text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2000-2002 Andrew Dalke.
# Copyright 2002-2004 Brad Chapman.
# Copyright 2006-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Represent a Sequence Record, a sequence with annotation."""
__docformat__ = "epytext en" #Simple markup to show doctests nicely
# NEEDS TO BE SYNCH WITH THE REST OF BIOPYTHON AND BIOPERL
# In particular, the SeqRecord and BioSQL.BioSeq.DBSeqRecord classes
# need to be in sync (this is the BioSQL "Database SeqRecord", see
# also BioSQL.BioSeq.DBSeq which is the "Database Seq" class)
class _RestrictedDict(dict):
"""Dict which only allows sequences of given length as values (PRIVATE).
This simple subclass of the Python dictionary is used in the SeqRecord
object for holding per-letter-annotations. This class is intended to
prevent simple errors by only allowing python sequences (e.g. lists,
strings and tuples) to be stored, and only if their length matches that
expected (the length of the SeqRecord's seq object). It cannot however
prevent the entries being edited in situ (for example appending entries
to a list).
>>> x = _RestrictedDict(5)
>>> x["test"] = "hello"
>>> x
{'test': 'hello'}
Adding entries which don't have the expected length are blocked:
>>> x["test"] = "hello world"
Traceback (most recent call last):
...
TypeError: We only allow python sequences (lists, tuples or strings) of length 5.
The expected length is stored as a private attribute,
>>> x._length
5
In order that the SeqRecord (and other objects using this class) can be
pickled, for example for use in the multiprocessing library, we need to
be able to pickle the restricted dictionary objects.
Using the default protocol, which is 0 on Python 2.x,
>>> import pickle
>>> y = pickle.loads(pickle.dumps(x))
>>> y
{'test': 'hello'}
>>> y._length
5
Using the highest protocol, which is 2 on Python 2.x,
>>> import pickle
>>> z = pickle.loads(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
>>> z
{'test': 'hello'}
>>> z._length
5
"""
def __init__(self, length):
"""Create an EMPTY restricted dictionary."""
dict.__init__(self)
self._length = int(length)
def __setitem__(self, key, value):
#The check hasattr(self, "_length") is to cope with pickle protocol 2
#I couldn't seem to avoid this with __getstate__ and __setstate__
if not hasattr(value,"__len__") or not hasattr(value,"__getitem__") \
or (hasattr(self, "_length") and len(value) != self._length):
raise TypeError("We only allow python sequences (lists, tuples or "
"strings) of length %i." % self._length)
dict.__setitem__(self, key, value)
def update(self, new_dict):
#Force this to go via our strict __setitem__ method
for (key, value) in new_dict.iteritems():
self[key] = value
class SeqRecord(object):
"""A SeqRecord object holds a sequence and information about it.
Main attributes:
- id - Identifier such as a locus tag (string)
- seq - The sequence itself (Seq object or similar)
Additional attributes:
- name - Sequence name, e.g. gene name (string)
- description - Additional text (string)
- dbxrefs - List of database cross references (list of strings)
- features - Any (sub)features defined (list of SeqFeature objects)
- annotations - Further information about the whole sequence (dictionary)
Most entries are strings, or lists of strings.
- letter_annotations - Per letter/symbol annotation (restricted
dictionary). This holds Python sequences (lists, strings
or tuples) whose length matches that of the sequence.
A typical use would be to hold a list of integers
representing sequencing quality scores, or a string
representing the secondary structure.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly (see the __init__ method for further details):
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> print record
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
If you want to save SeqRecord objects to a sequence file, use Bio.SeqIO
for this. For the special case where you want the SeqRecord turned into
a string in a particular file format there is a format method which uses
Bio.SeqIO internally:
>>> print record.format("fasta")
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
You can also do things like slicing a SeqRecord, checking its length, etc
>>> len(record)
44
>>> edited = record[:10] + record[11:]
>>> print edited.seq
MKQHKAMIVAIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
>>> print record.seq
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
"""
def __init__(self, seq, id = "<unknown id>", name = "<unknown name>",
description = "<unknown description>", dbxrefs = None,
features = None, annotations = None,
letter_annotations = None):
"""Create a SeqRecord.
Arguments:
- seq - Sequence, required (Seq, MutableSeq or UnknownSeq)
- id - Sequence identifier, recommended (string)
- name - Sequence name, optional (string)
- description - Sequence description, optional (string)
- dbxrefs - Database cross references, optional (list of strings)
- features - Any (sub)features, optional (list of SeqFeature objects)
- annotations - Dictionary of annotations for the whole sequence
- letter_annotations - Dictionary of per-letter-annotations, values
should be strings, list or tuples of the same
length as the full sequence.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly.
Note that while an id is optional, we strongly recommend you supply a
unique id string for each record. This is especially important
if you wish to write your sequences to a file.
If you don't have the actual sequence, but you do know its length,
then using the UnknownSeq object from Bio.Seq is appropriate.
You can create a 'blank' SeqRecord object, and then populate the
attributes later.
"""
if id is not None and not isinstance(id, basestring):
#Lots of existing code uses id=None... this may be a bad idea.
raise TypeError("id argument should be a string")
if not isinstance(name, basestring):
raise TypeError("name argument should be a string")
if not isinstance(description, basestring):
raise TypeError("description argument should be a string")
self._seq = seq
self.id = id
self.name = name
self.description = description
# database cross references (for the whole sequence)
if dbxrefs is None:
dbxrefs = []
elif not isinstance(dbxrefs, list):
raise TypeError("dbxrefs argument should be a list (of strings)")
self.dbxrefs = dbxrefs
# annotations about the whole sequence
if annotations is None:
annotations = {}
elif not isinstance(annotations, dict):
raise TypeError("annotations argument should be a dict")
self.annotations = annotations
if letter_annotations is None:
# annotations about each letter in the sequence
if seq is None:
#Should we allow this and use a normal unrestricted dict?
self._per_letter_annotations = _RestrictedDict(length=0)
else:
try:
self._per_letter_annotations = \
_RestrictedDict(length=len(seq))
except:
raise TypeError("seq argument should be a Seq object or similar")
else:
#This will be handled via the property set function, which will
#turn this into a _RestrictedDict and thus ensure all the values
#in the dict are the right length
self.letter_annotations = letter_annotations
# annotations about parts of the sequence
if features is None:
features = []
elif not isinstance(features, list):
raise TypeError("features argument should be a list (of SeqFeature objects)")
self.features = features
#TODO - Just make this a read only property?
def _set_per_letter_annotations(self, value):
if not isinstance(value, dict):
raise TypeError("The per-letter-annotations should be a "
"(restricted) dictionary.")
#Turn this into a restricted-dictionary (and check the entries)
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
#e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
self._per_letter_annotations.update(value)
letter_annotations = property( \
fget=lambda self : self._per_letter_annotations,
fset=_set_per_letter_annotations,
doc="""Dictionary of per-letter-annotation for the sequence.
For example, this can hold quality scores used in FASTQ or QUAL files.
Consider this example using Bio.SeqIO to read in an example Solexa
variant FASTQ file as a SeqRecord:
>>> from Bio import SeqIO
>>> handle = open("Quality/solexa_faked.fastq", "rU")
>>> record = SeqIO.read(handle, "fastq-solexa")
>>> handle.close()
>>> print record.id, record.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print record.letter_annotations.keys()
['solexa_quality']
>>> print record.letter_annotations["solexa_quality"]
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
The letter_annotations get sliced automatically if you slice the
parent SeqRecord, for example taking the last ten bases:
>>> sub_record = record[-10:]
>>> print sub_record.id, sub_record.seq
slxa_0001_1_0001_01 ACGTNNNNNN
>>> print sub_record.letter_annotations["solexa_quality"]
[4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Any python sequence (i.e. list, tuple or string) can be recorded in
the SeqRecord's letter_annotations dictionary as long as the length
matches that of the SeqRecord's sequence. e.g.
>>> len(sub_record.letter_annotations)
1
>>> sub_record.letter_annotations["dummy"] = "abcdefghij"
>>> len(sub_record.letter_annotations)
2
You can delete entries from the letter_annotations dictionary as usual:
>>> del sub_record.letter_annotations["solexa_quality"]
>>> sub_record.letter_annotations
{'dummy': 'abcdefghij'}
You can completely clear the dictionary easily as follows:
>>> sub_record.letter_annotations = {}
>>> sub_record.letter_annotations
{}
""")
def _set_seq(self, value):
#TODO - Add a deprecation warning that the seq should be write only?
if self._per_letter_annotations:
#TODO - Make this a warning? Silently empty the dictionary?
raise ValueError("You must empty the letter annotations first!")
self._seq = value
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
#e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
seq = property(fget=lambda self : self._seq,
fset=_set_seq,
doc="The sequence itself, as a Seq or MutableSeq object.")
def __getitem__(self, index):
"""Returns a sub-sequence or an individual letter.
Slicing, e.g. my_record[5:10], returns a new SeqRecord for
that sub-sequence with approriate annotation preserved. The
name, id and description are kept.
Any per-letter-annotations are sliced to match the requested
sub-sequence. Unless a stride is used, all those features
which fall fully within the subsequence are included (with
their locations adjusted accordingly).
However, the annotations dictionary and the dbxrefs list are
not used for the new SeqRecord, as in general they may not
apply to the subsequence. If you want to preserve them, you
must explictly copy them to the new SeqRecord yourself.
Using an integer index, e.g. my_record[5] is shorthand for
extracting that letter from the sequence, my_record.seq[5].
For example, consider this short protein and its secondary
structure as encoded by the PDB (e.g. H for alpha helices),
plus a simple feature for its histidine self phosphorylation
site:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> from Bio.Alphabet import IUPAC
>>> rec = SeqRecord(Seq("MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLAT"
... "EMMSEQDGYLAESINKDIEECNAIIEQFIDYLR",
... IUPAC.protein),
... id="1JOY", name="EnvZ",
... description="Homodimeric domain of EnvZ from E. coli")
>>> rec.letter_annotations["secondary_structure"] = " S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT "
>>> rec.features.append(SeqFeature(FeatureLocation(20,21),
... type = "Site"))
Now let's have a quick look at the full record,
>>> print rec
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein())
>>> print rec.letter_annotations["secondary_structure"]
S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT
>>> print rec.features[0].location
[20:21]
Now let's take a sub sequence, here chosen as the first (fractured)
alpha helix which includes the histidine phosphorylation site:
>>> sub = rec[11:41]
>>> print sub
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('RTLLMAGVSHDLRTPLTRIRLATEMMSEQD', IUPACProtein())
>>> print sub.letter_annotations["secondary_structure"]
HHHHHTTTHHHHHHHHHHHHHHHHHHHHHH
>>> print sub.features[0].location
[9:10]
You can also of course omit the start or end values, for
example to get the first ten letters only:
>>> print rec[:10]
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('MAAGVKQLAD', IUPACProtein())
Or for the last ten letters:
>>> print rec[-10:]
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('IIEQFIDYLR', IUPACProtein())
If you omit both, then you get a copy of the original record (although
lacking the annotations and dbxrefs):
>>> print rec[:]
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein())
Finally, indexing with a simple integer is shorthand for pulling out
that letter from the sequence directly:
>>> rec[5]
'K'
>>> rec.seq[5]
'K'
"""
if isinstance(index, int):
#NOTE - The sequence level annotation like the id, name, etc
#do not really apply to a single character. However, should
#we try and expose any per-letter-annotation here? If so how?
return self.seq[index]
elif isinstance(index, slice):
if self.seq is None:
raise ValueError("If the sequence is None, we cannot slice it.")
parent_length = len(self)
answer = self.__class__(self.seq[index],
id=self.id,
name=self.name,
description=self.description)
#TODO - The desription may no longer apply.
#It would be safer to change it to something
#generic like "edited" or the default value.
#Don't copy the annotation dict and dbxefs list,
#they may not apply to a subsequence.
#answer.annotations = dict(self.annotations.iteritems())
#answer.dbxrefs = self.dbxrefs[:]
#TODO - Review this in light of adding SeqRecord objects?
#TODO - Cope with strides by generating ambiguous locations?
start, stop, step = index.indices(parent_length)
if step == 1:
#Select relevant features, add them with shifted locations
#assert str(self.seq)[index] == str(self.seq)[start:stop]
for f in self.features:
if f.ref or f.ref_db:
#TODO - Implement this (with lots of tests)?
import warnings
warnings.warn("When slicing SeqRecord objects, any "
"SeqFeature referencing other sequences (e.g. "
"from segmented GenBank records) are ignored.")
continue
if start <= f.location.nofuzzy_start \
and f.location.nofuzzy_end <= stop:
answer.features.append(f._shift(-start))
#Slice all the values to match the sliced sequence
#(this should also work with strides, even negative strides):
for key, value in self.letter_annotations.iteritems():
answer._per_letter_annotations[key] = value[index]
return answer
raise ValueError, "Invalid index"
def __iter__(self):
"""Iterate over the letters in the sequence.
For example, using Bio.SeqIO to read in a protein FASTA file:
>>> from Bio import SeqIO
>>> record = SeqIO.read(open("Fasta/loveliesbleeding.pro"),"fasta")
>>> for amino in record:
... print amino
... if amino == "L": break
X
A
G
L
>>> print record.seq[3]
L
This is just a shortcut for iterating over the sequence directly:
>>> for amino in record.seq:
... print amino
... if amino == "L": break
X
A
G
L
>>> print record.seq[3]
L
Note that this does not facilitate iteration together with any
per-letter-annotation. However, you can achieve that using the
python zip function on the record (or its sequence) and the relevant
per-letter-annotation:
>>> from Bio import SeqIO
>>> rec = SeqIO.read(open("Quality/solexa_faked.fastq", "rU"),
... "fastq-solexa")
>>> print rec.id, rec.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print rec.letter_annotations.keys()
['solexa_quality']
>>> for nuc, qual in zip(rec,rec.letter_annotations["solexa_quality"]):
... if qual > 35:
... print nuc, qual
A 40
C 39
G 38
T 37
A 36
You may agree that using zip(rec.seq, ...) is more explicit than using
zip(rec, ...) as shown above.
"""
return iter(self.seq)
def __contains__(self, char):
"""Implements the 'in' keyword, searches the sequence.
e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read(open("Fasta/sweetpea.nu"), "fasta")
>>> "GAATTC" in record
False
>>> "AAA" in record
True
This essentially acts as a proxy for using "in" on the sequence:
>>> "GAATTC" in record.seq
False
>>> "AAA" in record.seq
True
Note that you can also use Seq objects as the query,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> Seq("AAA") in record
True
>>> Seq("AAA", generic_dna) in record
True
See also the Seq object's __contains__ method.
"""
return char in self.seq
def __str__(self):
"""A human readable summary of the record and its annotation (string).
The python built in function str works by calling the object's ___str__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein, small")
>>> print str(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
In this example you don't actually need to call str explicity, as the
print command does this automatically:
>>> print record
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
Note that long sequences are shown truncated.
"""
lines = []
if self.id:
lines.append("ID: %s" % self.id)
if self.name:
lines.append("Name: %s" % self.name)
if self.description:
lines.append("Description: %s" % self.description)
if self.dbxrefs:
lines.append("Database cross-references: " \
+ ", ".join(self.dbxrefs))
lines.append("Number of features: %i" % len(self.features))
for a in self.annotations:
lines.append("/%s=%s" % (a, str(self.annotations[a])))
if self.letter_annotations:
lines.append("Per letter annotation for: " \
+ ", ".join(self.letter_annotations.keys()))
#Don't want to include the entire sequence,
#and showing the alphabet is useful:
lines.append(repr(self.seq))
return "\n".join(lines)
def __repr__(self):
"""A concise summary of the record for debugging (string).
The python built in function repr works by calling the object's ___repr__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import generic_protein
>>> rec = SeqRecord(Seq("MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKAT"
... +"GEMKEQTEWHRVVLFGKLAEVASEYLRKGSQVYIEGQLRTRKWTDQ"
... +"SGQDRYTTEVVVNVGGTMQMLGGRQGGGAPAGGNIGGGQPQGGWGQ"
... +"PQQPQGGNQFSGGAQSRPQQSAPAAPSNEPPMDFDDDIPF",
... generic_protein),
... id="NP_418483.1", name="b4059",
... description="ssDNA-binding protein",
... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"])
>>> print repr(rec)
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
At the python prompt you can also use this shorthand:
>>> rec
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
Note that long sequences are shown truncated. Also note that any
annotations, letter_annotations and features are not shown (as they
would lead to a very long string).
"""
return self.__class__.__name__ \
+ "(seq=%s, id=%s, name=%s, description=%s, dbxrefs=%s)" \
% tuple(map(repr, (self.seq, self.id, self.name,
self.description, self.dbxrefs)))
def format(self, format):
r"""Returns the record as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.SeqIO, which is used to turn the SeqRecord into a
string. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> record.format("fasta")
'>YP_025292.1 toxic membrane protein\nMKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF\n'
>>> print record.format("fasta")
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
The python print command automatically appends a new line, meaning
in this example a blank line is shown. If you look at the string
representation you can see there is a trailing new line (shown as
slash n) which is important when writing to a file or if
concatenating mutliple sequence strings together.
Note that this method will NOT work on every possible file format
supported by Bio.SeqIO (e.g. some are for multiple sequences only).
"""
#See also the __format__ added for Python 2.6 / 3.0, PEP 3101
#See also the Bio.Align.Generic.Alignment class and its format()
return self.__format__(format)
def __format__(self, format_spec):
"""Returns the record as a string in the specified file format.
This method supports the python format() function added in
Python 2.6/3.0. The format_spec should be a lower case string
supported by Bio.SeqIO as an output file format. See also the
SeqRecord's format() method.
"""
if not format_spec:
#Follow python convention and default to using __str__
return str(self)
from Bio import SeqIO
if format_spec in SeqIO._BinaryFormats:
#Return bytes on Python 3
try:
#This is in Python 2.6+, but we need it on Python 3
from io import BytesIO
handle = BytesIO()
except ImportError:
#Must be on Python 2.5 or older
from StringIO import StringIO
handle = StringIO()
else:
from StringIO import StringIO
handle = StringIO()
SeqIO.write(self, handle, format_spec)
return handle.getvalue()
def __len__(self):
"""Returns the length of the sequence.
For example, using Bio.SeqIO to read in a FASTA nucleotide file:
>>> from Bio import SeqIO
>>> record = SeqIO.read(open("Fasta/sweetpea.nu"),"fasta")
>>> len(record)
309
>>> len(record.seq)
309
"""
return len(self.seq)
def __nonzero__(self):
"""Returns True regardless of the length of the sequence.
This behaviour is for backwards compatibility, since until the
__len__ method was added, a SeqRecord always evaluated as True.
Note that in comparison, a Seq object will evaluate to False if it
has a zero length sequence.
WARNING: The SeqRecord may in future evaluate to False when its
sequence is of zero length (in order to better match the Seq
object behaviour)!
"""
return True
def __add__(self, other):
"""Add another sequence or string to this sequence.
The other sequence can be a SeqRecord object, a Seq object (or
similar, e.g. a MutableSeq) or a plain Python string. If you add
a plain string or a Seq (like) object, the new SeqRecord will simply
have this appended to the existing data. However, any per letter
annotation will be lost:
>>> from Bio import SeqIO
>>> handle = open("Quality/solexa_faked.fastq", "rU")
>>> record = SeqIO.read(handle, "fastq-solexa")
>>> handle.close()
>>> print record.id, record.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print record.letter_annotations.keys()
['solexa_quality']
>>> new = record + "ACT"
>>> print new.id, new.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNNACT
>>> print new.letter_annotations.keys()
[]
The new record will attempt to combine the annotation, but for any
ambiguities (e.g. different names) it defaults to omitting that
annotation.
>>> from Bio import SeqIO
>>> handle = open("GenBank/pBAD30.gb")
>>> plasmid = SeqIO.read(handle, "gb")
>>> handle.close()
>>> print plasmid.id, len(plasmid)
pBAD30 4923
Now let's cut the plasmid into two pieces, and join them back up the
other way round (i.e. shift the starting point on this plasmid, have
a look at the annotated features in the original file to see why this
particular split point might make sense):
>>> left = plasmid[:3765]
>>> right = plasmid[3765:]
>>> new = right + left
>>> print new.id, len(new)
pBAD30 4923
>>> str(new.seq) == str(right.seq + left.seq)
True
>>> len(new.features) == len(left.features) + len(right.features)
True
When we add the left and right SeqRecord objects, their annotation
is all consistent, so it is all conserved in the new SeqRecord:
>>> new.id == left.id == right.id == plasmid.id
True
>>> new.name == left.name == right.name == plasmid.name
True
>>> new.description == plasmid.description
True
>>> new.annotations == left.annotations == right.annotations
True
>>> new.letter_annotations == plasmid.letter_annotations
True
>>> new.dbxrefs == left.dbxrefs == right.dbxrefs
True
However, we should point out that when we sliced the SeqRecord,
any annotations dictionary or dbxrefs list entries were lost.
You can explicitly copy them like this:
>>> new.annotations = plasmid.annotations.copy()
>>> new.dbxrefs = plasmid.dbxrefs[:]
"""
if not isinstance(other, SeqRecord):
#Assume it is a string or a Seq.
#Note can't transfer any per-letter-annotations
return SeqRecord(self.seq + other,
id = self.id, name = self.name,
description = self.description,
features = self.features[:],
annotations = self.annotations.copy(),
dbxrefs = self.dbxrefs[:])
#Adding two SeqRecord objects... must merge annotation.
answer = SeqRecord(self.seq + other.seq,
features = self.features[:],
dbxrefs = self.dbxrefs[:])
#Will take all the features and all the db cross refs,
l = len(self)
for f in other.features:
answer.features.append(f._shift(l))
del l
for ref in other.dbxrefs:
if ref not in answer.dbxrefs:
answer.dbxrefs.append(ref)
#Take common id/name/description/annotation
if self.id == other.id:
answer.id = self.id
if self.name == other.name:
answer.name = self.name
if self.description == other.description:
answer.description = self.description
for k,v in self.annotations.iteritems():
if k in other.annotations and other.annotations[k] == v:
answer.annotations[k] = v
#Can append matching per-letter-annotation
for k,v in self.letter_annotations.iteritems():
if k in other.letter_annotations:
answer.letter_annotations[k] = v + other.letter_annotations[k]
return answer
def __radd__(self, other):
"""Add another sequence or string to this sequence (from the left).
This method handles adding a Seq object (or similar, e.g. MutableSeq)
or a plain Python string (on the left) to a SeqRecord (on the right).
See the __add__ method for more details, but for example:
>>> from Bio import SeqIO
>>> handle = open("Quality/solexa_faked.fastq", "rU")
>>> record = SeqIO.read(handle, "fastq-solexa")
>>> handle.close()
>>> print record.id, record.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print record.letter_annotations.keys()
['solexa_quality']
>>> new = "ACT" + record
>>> print new.id, new.seq
slxa_0001_1_0001_01 ACTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print new.letter_annotations.keys()
[]
"""
if isinstance(other, SeqRecord):
raise RuntimeError("This should have happened via the __add__ of "
"the other SeqRecord being added!")
#Assume it is a string or a Seq.
#Note can't transfer any per-letter-annotations
offset = len(other)
return SeqRecord(other + self.seq,
id = self.id, name = self.name,
description = self.description,
features = [f._shift(offset) for f in self.features],
annotations = self.annotations.copy(),
dbxrefs = self.dbxrefs[:])
def upper(self):
"""Returns a copy of the record with an upper case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("acgtACGT", generic_dna), id="Test",
... description = "Made up for this example")
>>> record.letter_annotations["phred_quality"] = [1,2,3,4,5,6,7,8]
>>> print record.upper().format("fastq")
@Test Made up for this example
ACGTACGT
+
"#$%&'()
<BLANKLINE>
Naturally, there is a matching lower method:
>>> print record.lower().format("fastq")
@Test Made up for this example
acgtacgt
+
"#$%&'()
<BLANKLINE>
"""
return SeqRecord(self.seq.upper(),
id = self.id, name = self.name,
description = self.description,
dbxrefs = self.dbxrefs[:],
features = self.features[:],
annotations = self.annotations.copy(),
letter_annotations=self.letter_annotations.copy())
def lower(self):
"""Returns a copy of the record with a lower case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/aster.pro", "fasta")
>>> print record.format("fasta")
>gi|3298468|dbj|BAA31520.1| SAMIPF
GGHVNPAVTFGAFVGGNITLLRGIVYIIAQLLGSTVACLLLKFVTNDMAVGVFSLSAGVG
VTNALVFEIVMTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI
<BLANKLINE>
>>> print record.lower().format("fasta")
>gi|3298468|dbj|BAA31520.1| SAMIPF
gghvnpavtfgafvggnitllrgivyiiaqllgstvaclllkfvtndmavgvfslsagvg
vtnalvfeivmtfglvytvyataidpkkgslgtiapiaigfivgani
<BLANKLINE>
To take a more annotation rich example,
>>> from Bio import SeqIO
>>> old = SeqIO.read("EMBL/TRBG361.embl", "embl")
>>> len(old.features)
3
>>> new = old.lower()
>>> len(old.features) == len(new.features)
True
>>> old.annotations["organism"] == new.annotations["organism"]
True
>>> old.dbxrefs == new.dbxrefs
True
"""
return SeqRecord(self.seq.lower(),
id = self.id, name = self.name,
description = self.description,
dbxrefs = self.dbxrefs[:],
features = self.features[:],
annotations = self.annotations.copy(),
letter_annotations=self.letter_annotations.copy())
def reverse_complement(self, id=False, name=False, description=False,
features=True, annotations=False,
letter_annotations=True, dbxrefs=False):
"""Returns new SeqRecord with reverse complement sequence.
You can specify the returned record's id, name and description as
strings, or True to keep that of the parent, or False for a default.
You can specify the returned record's features with a list of
SeqFeature objects, or True to keep that of the parent, or False to
omit them. The default is to keep the original features (with the
strand and locations adjusted).
You can also specify both the returned record's annotations and
letter_annotations as dictionaries, True to keep that of the parent,
or False to omit them. The default is to keep the original
annotations (with the letter annotations reversed).
To show what happens to the pre-letter annotations, consider an
example Solexa variant FASTQ file with a single entry, which we'll
read in as a SeqRecord:
>>> from Bio import SeqIO
>>> handle = open("Quality/solexa_faked.fastq", "rU")
>>> record = SeqIO.read(handle, "fastq-solexa")
>>> handle.close()
>>> print record.id, record.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print record.letter_annotations.keys()
['solexa_quality']
>>> print record.letter_annotations["solexa_quality"]
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Now take the reverse complement,
>>> rc_record = record.reverse_complement(id=record.id+"_rc")
>>> print rc_record.id, rc_record.seq
slxa_0001_1_0001_01_rc NNNNNNACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT
Notice that the per-letter-annotations have also been reversed,
although this may not be appropriate for all cases.
>>> print rc_record.letter_annotations["solexa_quality"]
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
Now for the features, we need a different example. Parsing a GenBank
file is probably the easiest way to get an nice example with features
in it...
>>> from Bio import SeqIO
>>> handle = open("GenBank/pBAD30.gb")
>>> plasmid = SeqIO.read(handle, "gb")
>>> handle.close()
>>> print plasmid.id, len(plasmid)
pBAD30 4923
>>> plasmid.seq
Seq('GCTAGCGGAGTGTATACTGGCTTACTATGTTGGCACTGATGAGGGTGTCAGTGA...ATG', IUPACAmbiguousDNA())
>>> len(plasmid.features)
13
Now, let's take the reverse complement of this whole plasmid:
>>> rc_plasmid = plasmid.reverse_complement(id=plasmid.id+"_rc")
>>> print rc_plasmid.id, len(rc_plasmid)
pBAD30_rc 4923
>>> rc_plasmid.seq
Seq('CATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCA...AGC', IUPACAmbiguousDNA())
>>> len(rc_plasmid.features)
13
Let's compare the first CDS feature - it has gone from being the
second feature (index 1) to the second last feature (index -2), its
strand has changed, and the location switched round.
>>> print plasmid.features[1]
type: CDS
location: [1081:1960](-)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
>>> print rc_plasmid.features[-2]
type: CDS
location: [2963:3842](+)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
You can check this new location, based on the length of the plasmid:
>>> len(plasmid) - 1081
3842
>>> len(plasmid) - 1960
2963
Note that if the SeqFeature annotation includes any strand specific
information (e.g. base changes for a SNP), this information is not
ammended, and would need correction after the reverse complement.
Note trying to reverse complement a protein SeqRecord raises an
exception:
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> protein_rec = SeqRecord(Seq("MAIVMGR", IUPAC.protein), id="Test")
>>> protein_rec.reverse_complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
Also note you can reverse complement a SeqRecord using a MutableSeq:
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Seq import MutableSeq
>>> from Bio.Alphabet import generic_dna
>>> rec = SeqRecord(MutableSeq("ACGT", generic_dna), id="Test")
>>> rec.seq[0] = "T"
>>> print rec.id, rec.seq
Test TCGT
>>> rc = rec.reverse_complement(id=True)
>>> print rc.id, rc.seq
Test ACGA
"""
from Bio.Seq import MutableSeq #Lazy to avoid circular imports
if isinstance(self.seq, MutableSeq):
#Currently the MutableSeq reverse complement is in situ
answer = SeqRecord(self.seq.toseq().reverse_complement())
else:
answer = SeqRecord(self.seq.reverse_complement())
if isinstance(id, basestring):
answer.id = id
elif id:
answer.id = self.id
if isinstance(name, basestring):
answer.name = name
elif name:
answer.name = self.name
if isinstance(description, basestring):
answer.description = description
elif description:
answer.description = self.description
if isinstance(dbxrefs, list):
answer.dbxrefs = dbxrefs
elif dbxrefs:
#Copy the old dbxrefs
answer.dbxrefs = self.dbxrefs[:]
if isinstance(features, list):
answer.features = features
elif features:
#Copy the old features, adjusting location and string
l = len(answer)
answer.features = [f._flip(l) for f in self.features]
#The old list should have been sorted by start location,
#reversing it will leave it sorted by what is now the end position,
#so we need to resort in case of overlapping features.
#NOTE - In the common case of gene before CDS (and similar) with
#the exact same locations, this will still maintain gene before CDS
answer.features.sort(key=lambda x : x.location.start.position)
if isinstance(annotations, dict):
answer.annotations = annotations
elif annotations:
#Copy the old annotations,
answer.annotations = self.annotations.copy()
if isinstance(letter_annotations, dict):
answer.letter_annotations = letter_annotations
elif letter_annotations:
#Copy the old per letter annotations, reversing them
for key, value in self.letter_annotations.iteritems():
answer._per_letter_annotations[key] = value[::-1]
return answer
def _test():
"""Run the Bio.SeqRecord module's doctests (PRIVATE).
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..","Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..","Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
elif os.path.isdir(os.path.join("Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
|
bryback/quickseq
|
genescript/Bio/SeqRecord.py
|
Python
|
mit
| 47,663
|
[
"BioPerl",
"Biopython"
] |
96343ae9842a1bfacfd9bfbb8338348672006c0c6db84d73ac436dbd850e6071
|
# TODO:
# * Deal with hot plugging / unplugging of sticks
# * Can I get a unique ID for a model / instance of a stick?
import pyglet
from direct.showbase.ShowBase import ShowBase
from panda3d.core import WindowProperties
from panda3d.core import Filename
class World(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.j = Joypad()
self.taskMgr.add(self.update, 'updateJoypad')
def update(self, task):
dt = globalClock.getDt()
self.j.update()
return task.cont
class Joypad:
def __init__(self):
pyglet.app.platform_event_loop.start()
controller = Controller()
joysticks = pyglet.input.get_joysticks()
print("Found %s joysticks" % (len(joysticks), ))
for joystick in joysticks:
joystick.push_handlers(controller)
joystick.open()
def clean(self):
for joystick in pyglet.input.get_joysticks():
joystick.close()
def update(self):
pyglet.app.platform_event_loop.step(0.003)
class Controller:
def on_joybutton_press(self, joystick, button):
print(joystick, button)
def on_joybutton_release(self, joystick, button):
print(joystick, button)
def on_joyaxis_motion(self, joystick, axis, value):
print(joystick, axis, value)
def on_joyhat_motion(self, joystick, hat_x, hat_y):
print(joystick, hat_x, hat_y)
def main():
props = WindowProperties( )
props.setTitle('Hostil Galaxy')
props.setCursorFilename(Filename.binaryFilename('cursor.ico'))
props.setCursorHidden(False)
props.setFullscreen(False)
props.setSize(800, 600)
game = World()
game.win.setClearColor((1, 1, 1, 1))
game.win.requestProperties(props)
game.setFrameRateMeter(True)
game.run()
if __name__ == "__main__": main()
|
TurBoss/HostilGalaxy
|
test_joystick.py
|
Python
|
gpl-3.0
| 1,858
|
[
"Galaxy"
] |
3b6fa0f76bc6ce92ab69cb65bc18eb73cfb5f42945d3473c4170a2baed2e1595
|
# -*- coding: utf-8 -*-
"""
weight.py
RAPIDpy
Created by Alan D Snow, 2016.
Based on RAPID_Toolbox for ArcMap
License: BSD 3-Clause
"""
import csv
from datetime import datetime
from functools import partial
from netCDF4 import Dataset
import numpy as np
from pyproj import Proj, transform
from shapely.wkb import loads as shapely_loads
from shapely.ops import transform as shapely_transform
from shapely.geos import TopologicalError
import rtree # http://toblerity.org/rtree/install.html
from osgeo import gdal, ogr, osr
# local
from .voronoi import pointsToVoronoiGridArray
from ..helper_functions import log, open_csv
gdal.UseExceptions()
def get_poly_area_geo(poly):
"""
Calculates the area in meters squared of the individual polygon
"""
minx, miny, maxx, maxy = poly.bounds
# reproject polygon to get area
reprojected_for_area = Proj("+proj=aea +lat_1={0} +lat_1={1} "
"+lat_0={2} +lon_0={3}"
.format(miny,
maxy,
(miny + maxy) / 2.0,
(minx + maxx) / 2.0))
geographic_proj = Proj(init='epsg:4326')
project_func = partial(transform,
geographic_proj,
reprojected_for_area)
reprojected_poly = shapely_transform(project_func, poly)
return reprojected_poly.area
def _get_lat_lon_indices(lsm_lat_array, lsm_lon_array, lat, lon):
"""
Determines the index in the array (1D or 2D) where the
lat/lon point is
"""
if lsm_lat_array.ndim == 2 and lsm_lon_array.ndim == 2:
lsm_lat_indices_from_lat, lsm_lon_indices_from_lat = \
np.where((lsm_lat_array == lat))
lsm_lat_indices_from_lon, lsm_lon_indices_from_lon = \
np.where((lsm_lon_array == lon))
index_lsm_grid_lat = np.intersect1d(lsm_lat_indices_from_lat,
lsm_lat_indices_from_lon)[0]
index_lsm_grid_lon = np.intersect1d(lsm_lon_indices_from_lat,
lsm_lon_indices_from_lon)[0]
elif lsm_lat_array.ndim == 1 and lsm_lon_array.ndim == 1:
index_lsm_grid_lon = np.where(lsm_lon_array == lon)[0][0]
index_lsm_grid_lat = np.where(lsm_lat_array == lat)[0][0]
else:
raise IndexError("Lat/Lon lists have invalid dimensions. "
"Only 1D or 2D arrays allowed ...")
return index_lsm_grid_lat, index_lsm_grid_lon
def find_nearest(array, value):
"""
Get the nearest index to value searching for
"""
return (np.abs(array-value)).argmin()
def rtree_create_weight_table(lsm_grid_lat, lsm_grid_lon,
in_catchment_shapefile, river_id,
in_rapid_connect, out_weight_table,
file_geodatabase=None, area_id=None):
"""
Create Weight Table for Land Surface Model Grids
"""
time_start_all = datetime.utcnow()
if lsm_grid_lat.ndim == 3 and lsm_grid_lon.ndim == 3:
# assume first dimension is time
lsm_grid_lat = lsm_grid_lat[0]
lsm_grid_lon = lsm_grid_lon[0]
log("Generating LSM Grid Thiessen Array ...")
if file_geodatabase:
gdb_driver = ogr.GetDriverByName("OpenFileGDB")
ogr_file_geodatabase = gdb_driver.Open(file_geodatabase, 0)
ogr_catchment_shapefile_lyr = \
ogr_file_geodatabase.GetLayer(in_catchment_shapefile)
else:
ogr_catchment_shapefile = ogr.Open(in_catchment_shapefile)
ogr_catchment_shapefile_lyr = ogr_catchment_shapefile.GetLayer()
ogr_catchment_shapefile_lyr_proj = \
ogr_catchment_shapefile_lyr.GetSpatialRef()
original_catchment_proj = \
Proj(ogr_catchment_shapefile_lyr_proj.ExportToProj4())
geographic_proj = Proj(init='EPSG:4326')
extent = ogr_catchment_shapefile_lyr.GetExtent()
if original_catchment_proj != geographic_proj:
x, y = transform(original_catchment_proj,
geographic_proj,
[extent[0], extent[1]],
[extent[2], extent[3]])
extent = [min(x), max(x), min(y), max(y)]
lsm_grid_feature_list = \
pointsToVoronoiGridArray(lsm_grid_lat, lsm_grid_lon, extent)
# ##COMMENTED LINES FOR TESTING
# import os
# from .voronoi import pointsToVoronoiGridShapefile
# vor_shp_path = \
# os.path.join(os.path.dirname(in_catchment_shapefile), "test_grid.shp")
# pointsToVoronoiGridShapefile(lsm_grid_lat, lsm_grid_lon,
# vor_shp_path, extent)
time_end_lsm_grid_thiessen = datetime.utcnow()
log(time_end_lsm_grid_thiessen - time_start_all)
log("Generating LSM Grid Rtree ...")
rtree_idx = rtree.index.Index()
# Populate R-tree index with bounds of ECMWF grid cells
for lsm_grid_pos, lsm_grid_feature in enumerate(lsm_grid_feature_list):
rtree_idx.insert(lsm_grid_pos, lsm_grid_feature['polygon'].bounds)
time_end_lsm_grid_rtree = datetime.utcnow()
log(time_end_lsm_grid_rtree - time_end_lsm_grid_thiessen)
log("Retrieving catchment river id list ...")
number_of_catchment_features = \
ogr_catchment_shapefile_lyr.GetFeatureCount()
catchment_rivid_list = \
np.zeros(number_of_catchment_features, dtype=np.int32)
for feature_idx, catchment_feature in \
enumerate(ogr_catchment_shapefile_lyr):
catchment_rivid_list[feature_idx] = \
catchment_feature.GetField(river_id)
log("Reading in RAPID connect file ...")
rapid_connect_rivid_list = np.loadtxt(in_rapid_connect,
delimiter=",",
usecols=(0,),
ndmin=1,
dtype=int)
log("Find LSM grid cells that intersect with each catchment")
log("and write out weight table ...")
dummy_lat_index, dummy_lon_index = \
_get_lat_lon_indices(lsm_grid_lat,
lsm_grid_lon,
lsm_grid_feature_list[0]['lat'],
lsm_grid_feature_list[0]['lon'])
dummy_row_end = [
0,
dummy_lon_index,
dummy_lat_index,
1,
lsm_grid_feature_list[0]['lon'],
lsm_grid_feature_list[0]['lat']
]
with open_csv(out_weight_table, 'w') as csvfile:
connectwriter = csv.writer(csvfile)
connectwriter.writerow(['rivid', 'area_sqm', 'lon_index', 'lat_index',
'npoints', 'lsm_grid_lon', 'lsm_grid_lat'])
geographic_proj = Proj(init='EPSG:4326')
osr_geographic_proj = osr.SpatialReference()
osr_geographic_proj.ImportFromEPSG(4326)
proj_transform = None
if original_catchment_proj != geographic_proj:
proj_transform = \
osr.CoordinateTransformation(ogr_catchment_shapefile_lyr_proj,
osr_geographic_proj)
for rapid_connect_rivid in rapid_connect_rivid_list:
intersect_grid_info_list = []
try:
catchment_pos = \
np.where(catchment_rivid_list == rapid_connect_rivid)[0][0]
except IndexError:
# if it is not in the catchment, add dummy row in its place
connectwriter.writerow([rapid_connect_rivid] + dummy_row_end)
continue
get_catchment_feature = \
ogr_catchment_shapefile_lyr.GetFeature(catchment_pos)
feat_geom = get_catchment_feature.GetGeometryRef()
# make sure coordinates are geographic
if proj_transform:
feat_geom.Transform(proj_transform)
catchment_polygon = shapely_loads(feat_geom.ExportToWkb())
for sub_lsm_grid_pos in \
rtree_idx.intersection(catchment_polygon.bounds):
lsm_grid_polygon = \
lsm_grid_feature_list[sub_lsm_grid_pos]['polygon']
if catchment_polygon.intersects(lsm_grid_polygon):
try:
intersect_poly = \
catchment_polygon.intersection(lsm_grid_polygon)
except TopologicalError:
log('The catchment polygon with id {0} was '
'invalid. Attempting to self clean...'
.format(rapid_connect_rivid))
original_area = catchment_polygon.area
catchment_polygon = catchment_polygon.buffer(0)
area_ratio = original_area/catchment_polygon.area
log('AREA_RATIO: {0}'.format(area_ratio))
msg_level = "INFO"
if round(area_ratio, 5) != 1:
msg_level = "WARNING"
log('The cleaned catchment polygon area '
'differs from the original area by {1}%.'
.format(abs(area_ratio - 1)), severity=msg_level)
intersect_poly = \
catchment_polygon.intersection(lsm_grid_polygon)
if not area_id:
# attempt to calculate AREA
poly_area = get_poly_area_geo(intersect_poly)
else:
poly_area = \
float(get_catchment_feature.GetField(area_id)) * \
intersect_poly.area/catchment_polygon.area
index_lsm_grid_lat, index_lsm_grid_lon = \
_get_lat_lon_indices(
lsm_grid_lat,
lsm_grid_lon,
lsm_grid_feature_list[sub_lsm_grid_pos]['lat'],
lsm_grid_feature_list[sub_lsm_grid_pos]['lon'])
intersect_grid_info_list.append({
'rivid': rapid_connect_rivid,
'area': poly_area,
'lsm_grid_lat':
lsm_grid_feature_list[sub_lsm_grid_pos]['lat'],
'lsm_grid_lon':
lsm_grid_feature_list[sub_lsm_grid_pos]['lon'],
'index_lsm_grid_lon': index_lsm_grid_lon,
'index_lsm_grid_lat': index_lsm_grid_lat
})
npoints = len(intersect_grid_info_list)
# If no intersection found, add dummy row
if npoints <= 0:
connectwriter.writerow([rapid_connect_rivid] + dummy_row_end)
for intersect_grid_info in intersect_grid_info_list:
connectwriter.writerow([
intersect_grid_info['rivid'],
intersect_grid_info['area'],
intersect_grid_info['index_lsm_grid_lon'],
intersect_grid_info['index_lsm_grid_lat'],
npoints,
intersect_grid_info['lsm_grid_lon'],
intersect_grid_info['lsm_grid_lat']
])
time_end_all = datetime.utcnow()
log(time_end_all - time_end_lsm_grid_rtree)
log("TOTAL TIME: {0}".format(time_end_all - time_start_all))
def CreateWeightTableECMWF(in_ecmwf_nc,
in_catchment_shapefile,
river_id,
in_connectivity_file,
out_weight_table,
area_id=None,
file_geodatabase=None):
"""
Create Weight Table for ECMWF Grids
.. note:: The grids are in the RAPIDpy package under
the gis/lsm_grids folder.
Parameters
----------
in_ecmwf_nc: str
Path to the ECMWF NetCDF grid.
in_catchment_shapefile: str
Path to the Catchment shapefile.
river_id: str
The name of the field with the river ID (Ex. 'DrainLnID' or 'LINKNO').
in_connectivity_file: str
The path to the RAPID connectivity file.
out_weight_table: str
The path to the output weight table file.
area_id: str, optional
The name of the field with the area of each catchment stored in meters
squared. Default is it calculate the area.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option, in_drainage_line
is the name of the stream network feature class.
(WARNING: Not always stable with GDAL.)
Example:
.. code:: python
from RAPIDpy.gis.weight import CreateWeightTableECMWF
CreateWeightTableECMWF(
in_ecmwf_nc='/path/to/runoff_ecmwf_grid.nc'
in_catchment_shapefile='/path/to/catchment.shp',
river_id='LINKNO',
in_connectivity_file='/path/to/rapid_connect.csv',
out_weight_table='/path/to/ecmwf_weight.csv',
)
"""
# extract ECMWF GRID
data_ecmwf_nc = Dataset(in_ecmwf_nc)
variables_list = data_ecmwf_nc.variables.keys()
in_ecmwf_lat_var = 'lat'
if 'latitude' in variables_list:
in_ecmwf_lat_var = 'latitude'
in_ecmwf_lon_var = 'lon'
if 'longitude' in variables_list:
in_ecmwf_lon_var = 'longitude'
# convert [0, 360] to [-180, 180]
ecmwf_lon = \
(data_ecmwf_nc.variables[in_ecmwf_lon_var][:] + 180) % 360 - 180
# assume [-90, 90]
ecmwf_lat = data_ecmwf_nc.variables[in_ecmwf_lat_var][:]
data_ecmwf_nc.close()
rtree_create_weight_table(ecmwf_lat, ecmwf_lon,
in_catchment_shapefile, river_id,
in_connectivity_file, out_weight_table,
file_geodatabase, area_id)
def CreateWeightTableLDAS(in_ldas_nc,
in_nc_lon_var,
in_nc_lat_var,
in_catchment_shapefile,
river_id,
in_connectivity_file,
out_weight_table,
area_id=None,
file_geodatabase=None):
"""
Create Weight Table for NLDAS, GLDAS grids as well as for 2D Joules,
or LIS Grids
Parameters
----------
in_ldas_nc: str
Path to the land surface model NetCDF grid.
in_nc_lon_var: str
The variable name in the NetCDF file for the longitude.
in_nc_lat_var: str
The variable name in the NetCDF file for the latitude.
in_catchment_shapefile: str
Path to the Catchment shapefile.
river_id: str
The name of the field with the river ID (Ex. 'DrainLnID' or 'LINKNO').
in_connectivity_file: str
The path to the RAPID connectivity file.
out_weight_table: str
The path to the output weight table file.
area_id: str, optional
The name of the field with the area of each catchment stored in meters
squared. Default is it calculate the area.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option, in_drainage_line
is the name of the stream network feature class.
(WARNING: Not always stable with GDAL.)
Example:
.. code:: python
from RAPIDpy.gis.weight import CreateWeightTableLDAS
CreateWeightTableLDAS(
in_ldas_nc='/path/to/runoff_grid.nc',
in_nc_lon_var="lon_110",
in_nc_lat_var="lat_110",
in_catchment_shapefile='/path/to/catchment.shp',
river_id='LINKNO',
in_connectivity_file='/path/to/rapid_connect.csv',
out_weight_table='/path/to/ldas_weight.csv',
)
"""
# extract LDAS GRID
data_ldas_nc = Dataset(in_ldas_nc)
variables_list = data_ldas_nc.variables.keys()
if in_nc_lon_var not in variables_list:
raise Exception("Invalid longitude variable. Choose from: {0}"
.format(variables_list))
if in_nc_lat_var not in variables_list:
raise Exception("Invalid latitude variable. Choose from: {0}"
.format(variables_list))
ldas_lon = data_ldas_nc.variables[in_nc_lon_var][:] # assume [-180, 180]
ldas_lat = data_ldas_nc.variables[in_nc_lat_var][:] # assume [-90,90]
data_ldas_nc.close()
rtree_create_weight_table(ldas_lat, ldas_lon,
in_catchment_shapefile, river_id,
in_connectivity_file, out_weight_table,
file_geodatabase, area_id)
|
erdc-cm/RAPIDpy
|
RAPIDpy/gis/weight.py
|
Python
|
bsd-3-clause
| 16,840
|
[
"NetCDF"
] |
c1ece6667c019943d3abac4edf98fea5463e415dce05f26ddd1916e6006935f7
|
# Copyright 2004 by James Casbon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Code to deal with COMPASS output, a program for profile/profile comparison.
Compass is described in:
Sadreyev R, Grishin N. COMPASS: a tool for comparison of multiple protein
alignments with assessment of statistical significance. J Mol Biol. 2003 Feb
7;326(1):317-36.
Tested with COMPASS 1.24.
Functions:
read Reads a COMPASS file containing one COMPASS record
parse Iterates over records in a COMPASS file.
Classes:
Record One result of a COMPASS file
"""
import re
def read(handle):
record = None
try:
line = handle.next()
record = Record()
__read_names(record, line)
line = handle.next()
__read_threshold(record, line)
line = handle.next()
__read_lengths(record, line)
line = handle.next()
__read_profilewidth(record, line)
line = handle.next()
__read_scores(record, line)
except StopIteration:
if not record:
raise ValueError("No record found in handle")
else:
raise ValueError("Unexpected end of stream.")
for line in handle:
if not line.strip(): # skip empty lines
continue
__read_query_alignment(record, line)
try:
line = handle.next()
__read_positive_alignment(record, line)
line = handle.next()
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
return record
def parse(handle):
record = None
try:
line = handle.next()
except StopIteration:
return
while True:
try:
record = Record()
__read_names(record, line)
line = handle.next()
__read_threshold(record, line)
line = handle.next()
__read_lengths(record, line)
line = handle.next()
__read_profilewidth(record, line)
line = handle.next()
__read_scores(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
for line in handle:
if not line.strip():
continue
if "Ali1:" in line:
yield record
break
__read_query_alignment(record, line)
try:
line = handle.next()
__read_positive_alignment(record, line)
line = handle.next()
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
else:
yield record
break
class Record(object):
"""
Hold information from one compass hit.
Ali1 one is the query, Ali2 the hit.
"""
def __init__(self):
self.query=''
self.hit=''
self.gap_threshold=0
self.query_length=0
self.query_filtered_length=0
self.query_nseqs=0
self.query_neffseqs=0
self.hit_length=0
self.hit_filtered_length=0
self.hit_nseqs=0
self.hit_neffseqs=0
self.sw_score=0
self.evalue=-1
self.query_start=-1
self.hit_start=-1
self.query_aln=''
self.hit_aln=''
self.positives=''
def query_coverage(self):
"""Return the length of the query covered in alignment"""
s = self.query_aln.replace("=", "")
return len(s)
def hit_coverage(self):
"""Return the length of the hit covered in the alignment"""
s = self.hit_aln.replace("=", "")
return len(s)
# Everything below is private
__regex = {"names": re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+"),
"threshold": re.compile("Threshold of effective gap content in columns: (\S+)"),
"lengths": re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)\s+filtered_length2=(\S+)"),
"profilewidth": re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)"),
"scores": re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)"),
"start": re.compile("(\d+)"),
"align": re.compile("^.{15}(\S+)"),
"positive_alignment": re.compile("^.{15}(.+)"),
}
def __read_names(record, line):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
------query----- -------hit-------------
"""
if not "Ali1:" in line:
raise ValueError("Line does not contain 'Ali1:':\n%s" % line)
m = __regex["names"].search(line)
record.query = m.group(1)
record.hit = m.group(2)
def __read_threshold(record,line):
if not line.startswith("Threshold"):
raise ValueError("Line does not start with 'Threshold':\n%s" % line)
m = __regex["threshold"].search(line)
record.gap_threshold = float(m.group(1))
def __read_lengths(record, line):
if not line.startswith("length1="):
raise ValueError("Line does not start with 'length1=':\n%s" % line)
m = __regex["lengths"].search(line)
record.query_length = int(m.group(1))
record.query_filtered_length = float(m.group(2))
record.hit_length = int(m.group(3))
record.hit_filtered_length = float(m.group(4))
def __read_profilewidth(record, line):
if not "Nseqs1" in line:
raise ValueError("Line does not contain 'Nseqs1':\n%s" % line)
m = __regex["profilewidth"].search(line)
record.query_nseqs = int(m.group(1))
record.query_neffseqs = float(m.group(2))
record.hit_nseqs = int(m.group(3))
record.hit_neffseqs = float(m.group(4))
def __read_scores(record, line):
if not line.startswith("Smith-Waterman"):
raise ValueError("Line does not start with 'Smith-Waterman':\n%s" % line)
m = __regex["scores"].search(line)
if m:
record.sw_score = int(m.group(1))
record.evalue = float(m.group(2))
else:
record.sw_score = 0
record.evalue = -1.0
def __read_query_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.query_start = int(m.group(1))
m = __regex["align"].match(line)
assert m!=None, "invalid match"
record.query_aln += m.group(1)
def __read_positive_alignment(record, line):
m = __regex["positive_alignment"].match(line)
assert m!=None, "invalid match"
record.positives += m.group(1)
def __read_hit_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.hit_start = int(m.group(1))
m = __regex["align"].match(line)
assert m!=None, "invalid match"
record.hit_aln += m.group(1)
|
bryback/quickseq
|
genescript/Bio/Compass/__init__.py
|
Python
|
mit
| 6,865
|
[
"Biopython"
] |
0af25a39727bab8e50d425ec5539f4c1bcc6ae59387747548e4b85752a25d5f7
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import zoo.orca.automl.hp as hp
AUTO_MODEL_SUPPORT_LIST = ["lstm", "tcn", "seq2seq"]
AUTO_MODEL_DEFAULT_SEARCH_SPACE = {
"lstm": {"minimal": {"hidden_dim": hp.grid_search([16, 32]),
"layer_num": hp.randint(1, 2),
"lr": hp.loguniform(0.001, 0.005),
"dropout": hp.uniform(0.1, 0.2)},
"normal": {"hidden_dim": hp.grid_search([16, 32, 64]),
"layer_num": hp.grid_search([1, 2]),
"lr": hp.loguniform(0.0005, 0.01),
"dropout": hp.uniform(0, 0.2)},
"large": {"hidden_dim": hp.grid_search([16, 32, 64, 128]),
"layer_num": hp.grid_search([1, 2, 3, 4]),
"lr": hp.loguniform(0.0005, 0.01),
"dropout": hp.uniform(0, 0.3)}},
"tcn": {"minimal": {"hidden_units": hp.grid_search([16, 32]),
"levels": hp.randint(4, 6),
"kernel_size": 3,
"lr": hp.loguniform(0.001, 0.005),
"dropout": hp.uniform(0.1, 0.2)},
"normal": {"hidden_units": hp.grid_search([16, 32, 48]),
"levels": hp.grid_search([6, 8]),
"kernel_size": hp.grid_search([3, 5]),
"lr": hp.loguniform(0.001, 0.01),
"dropout": hp.uniform(0, 0.2)},
"large": {"hidden_units": hp.grid_search([16, 32, 48, 64]),
"levels": hp.grid_search([4, 5, 6, 7, 8]),
"kernel_size": hp.grid_search([3, 5, 7]),
"lr": hp.loguniform(0.0005, 0.015),
"dropout": hp.uniform(0, 0.25)}},
"seq2seq": {"minimal": {"lr": hp.loguniform(0.001, 0.005),
"lstm_hidden_dim": hp.grid_search([16, 32]),
"lstm_layer_num": hp.randint(1, 2),
"dropout": hp.uniform(0, 0.3),
"teacher_forcing": False},
"normal": {"lr": hp.loguniform(0.001, 0.005),
"lstm_hidden_dim": hp.grid_search([16, 32, 64]),
"lstm_layer_num": hp.grid_search([1, 2]),
"dropout": hp.uniform(0, 0.3),
"teacher_forcing": hp.grid_search([True, False])},
"large": {"lr": hp.loguniform(0.0005, 0.005),
"lstm_hidden_dim": hp.grid_search([16, 32, 64, 128]),
"lstm_layer_num": hp.grid_search([1, 2, 4]),
"dropout": hp.uniform(0, 0.3),
"teacher_forcing": hp.grid_search([True, False])}}
}
class AutoModelFactory:
@staticmethod
def create_auto_model(name, search_space):
name = name.lower()
if name == "lstm":
from .auto_lstm import AutoLSTM
revised_search_space = search_space.copy()
assert revised_search_space["future_seq_len"] == 1, \
"future_seq_len should be set to 1 if you choose lstm model."
del revised_search_space["future_seq_len"] # future_seq_len should always be 1
return AutoLSTM(**revised_search_space)
if name == "tcn":
from .auto_tcn import AutoTCN
return AutoTCN(**search_space)
if name == "seq2seq":
from .auto_seq2seq import AutoSeq2Seq
return AutoSeq2Seq(**search_space)
return NotImplementedError(f"{AUTO_MODEL_SUPPORT_LIST} are supported for auto model,\
but get {name}.")
@staticmethod
def get_default_search_space(model, computing_resource="normal"):
'''
This function should be called internally to get a default search_space experimentally.
:param model: model name, only tcn, lstm and seq2seq are supported
:param mode: one of "minimal", "normal", "large"
'''
model = model.lower()
if model in AUTO_MODEL_SUPPORT_LIST:
return AUTO_MODEL_DEFAULT_SEARCH_SPACE[model][computing_resource]
return NotImplementedError(f"{AUTO_MODEL_SUPPORT_LIST} are supported for auto model,\
but get {model}.")
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/chronos/autots/model/__init__.py
|
Python
|
apache-2.0
| 4,918
|
[
"ORCA"
] |
61fd979659cc231a436976a228d7d6a2cfce7b0f2bb758623c1b4f41fc85bddb
|
# This script is part of pymaid (http://www.github.com/schlegelp/pymaid).
# Copyright (C) 2017 Philipp Schlegel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
""" This module contains functions to analyse and manipulate neuron morphology.
"""
import itertools
import navis
import pandas as pd
import numpy as np
import networkx as nx
from navis import graph_utils, graph
from . import fetch, core, utils, config
# Set up logging
logger = config.logger
__all__ = sorted(['arbor_confidence', 'remove_tagged_branches', 'time_machine',
'union_neurons', 'prune_by_length'])
def arbor_confidence(x, confidences=(1, 0.9, 0.6, 0.4, 0.2), inplace=True):
"""Calculate along-the-arbor confidence for each treenode.
Calculates confidence for each treenode by walking from root to leafs
starting with a confidence of 1. Each time a low confidence edge is
encountered the downstream confidence is reduced (see ``confidences``).
Parameters
----------
x : CatmaidNeuron | CatmaidNeuronList
Neuron(s) to calculate confidence for.
confidences : list of five floats, optional
Values by which the confidence of the downstream
branche is reduced upon encounter of a 5/4/3/2/1-
confidence edges.
inplace : bool, optional
If False, a copy of the neuron is returned.
Returns
-------
Adds ``arbor_confidence`` column in ``neuron.nodes``.
"""
def walk_to_leafs(this_node, this_confidence=1):
pbar.update(1)
while True:
this_confidence *= confidences[5 - nodes.loc[this_node].confidence]
nodes.loc[this_node, 'arbor_confidence'] = this_confidence
if len(loc[this_node]) > 1:
for c in loc[this_node]:
walk_to_leafs(c, this_confidence)
break
elif len(loc[this_node]) == 0:
break
this_node = loc[this_node][0]
if not isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
raise TypeError('Unable to process data of type %s' % str(type(x)))
if isinstance(x, core.CatmaidNeuronList):
if not inplace:
_ = [arbor_confidence(n,
confidences=confidences,
inplace=inplace) for n in x]
else:
return core.CatmaidNeuronList([arbor_confidence(n,
confidences=confidences,
inplace=inplace) for n in x])
if not inplace:
x = x.copy()
loc = graph_utils.generate_list_of_childs(x)
x.nodes['arbor_confidence'] = [None] * x.nodes.shape[0]
nodes = x.nodes.set_index('node_id')
nodes.loc[x.root, 'arbor_confidence'] = 1
with config.tqdm(total=len(x.segments), desc='Calc confidence', disable=config.pbar_hide, leave=config.pbar_leave) as pbar:
for r in x.root:
for c in loc[r]:
walk_to_leafs(c)
x.nodes['arbor_confidence'] = nodes['arbor_confidence'].values
if not inplace:
return x
def union_neurons(*x, limit=1, base_neuron=None, track=False, non_overlap='raise'):
"""Generate the union of a set of neurons.
This implementation works by iteratively merging nodes in neuron A and B
that are closer than given threshold. This requires neurons to have a
certain amount of overlap.
Parameters
----------
*x : CatmaidNeuron/List
Neurons to be merged.
limit : int, optional
Max distance [microns] for nearest neighbour search.
base_neuron : skeleton_ID | CatmaidNeuron, optional
Neuron to use as template for union. Node IDs of this
neuron will survive. If not provided, the first neuron
in the list is used as template!
track : bool, optional
If True, will add new columns to node/connector table of
union neuron to keep track of original node IDs and origin:
`node_id_before`, `parent_id_before`, `origin_skeleton`
non_overlap : "raise" | "stitch" | "skip", optional
Determines how to deal with non-overlapping fragments. If
"raise" will raise an exception. If "stitch" will try
stitching the fragments using a minimum spanning tree
(see :func:`pymaid.stitch_neurons`).
Returns
-------
core.CatmaidNeuron
Union of all input neurons.
See Also
--------
:func:`~pymaid.stitch_neurons`
If you want to stitch neurons that do not overlap.
Examples
--------
>>> # Get a single neuron
>>> n = pymaid.get_neuron(16)
>>> # Prune to its longest neurite
>>> backbone = n.prune_by_longest_neurite(inplace=False)
>>> # Remove longest neurite and keep only fine branches
>>> branches = n.prune_by_longest_neurite(n=slice(1, None), inplace=False)
>>> # For this exercise we have to make sure skeleton IDs are unique
>>> branches.skeleton_id = 17
>>> # Now put both back together using union
>>> union = pymaid.union_neurons(backbone, branches, limit=2)
"""
allowed = ['raise', 'stitch', 'skip']
if non_overlap.lower() not in allowed:
msg = 'Unexpected value for non_overlap "{}". Please use either:'
msg = msg.format(non_overlap, '"{}", '.join(allowed))
raise ValueError(msg)
# Unpack neurons in *args
x = utils._unpack_neurons(x)
# Make sure we're working on copies and don't change originals
x = [n.copy() for n in x]
# This is just check on the off-chance that skeleton IDs are not unique
# (e.g. if neurons come from different projects) -> this is relevant because
# we identify the master ("base_neuron") via it's skeleton ID
skids = [n.skeleton_id for n in x]
if len(skids) > len(np.unique(skids)):
raise ValueError('Duplicate skeleton IDs found. Try manually assigning '
'unique skeleton IDs.')
if any([not isinstance(n, core.CatmaidNeuron) for n in x]):
raise TypeError('Input must only be CatmaidNeurons/List')
if len(x) < 2:
raise ValueError('Need at least 2 neurons to make a union!')
# Make sure base_neuron is a skeleton ID
if isinstance(base_neuron, core.CatmaidNeuron):
base_skid = base_neuron.skeleton_id
elif not isinstance(base_neuron, type(None)):
if base_neuron not in skids:
raise ValueError('Base neuron skeleton ID "{}" not in NeuronList'.format(base_neuron))
base_skid = base_neuron
else:
base_skid = x[0].skeleton_id
# Convert distance threshold from microns to nanometres
limit *= 1000
# Keep track of old IDs
if track:
for n in x:
# Original skeleton of each node
n.nodes['origin_skeletons'] = n.skeleton_id
# Original skeleton of each connector
n.connectors['origin_skeletons'] = n.skeleton_id
# Old parent if this node gets rewired
n.nodes['old_parent'] = n.nodes.parent_id.values
# Now make unions
all_clps_nodes = {}
while len(x) > 1:
# First we need to find a pair of overlapping neurons
comb = itertools.combinations(x, 2)
ol = False
for c in comb:
# If combination contains base_skid, make sure it's the master
if c[0].skeleton_id == base_skid:
master, minion = c[0], c[1]
else:
master, minion = c[1], c[0]
# Generate KDTree for master neuron
tree = graph.neuron2KDTree(master, tree_type='c', data='treenodes')
# For each node in master get the nearest neighbor in minion
coords = minion.nodes[['x', 'y', 'z']].values
nn_dist, nn_ix = tree.query(coords, k=1, distance_upper_bound=limit)
# Use this combination if overlap found
if any(nn_dist <= limit):
ol = True
break
# If no overlap between remaining fragments
if not ol:
miss = [n.skeleton_id for n in x if n.skeleton_id != base_skid]
msg = "{} fragments do not overlap: {}.".format(len(x) - 1,
", ".join(miss))
# Raise ...
if non_overlap.lower() == 'raise':
raise ValueError(msg + " Try increasing the `limit` parameter")
# ... or stitch up neurons using mst and break the loop...
elif non_overlap.lower() == 'stitch':
logger.warning(msg + " Stitching.")
x = navis.stitch_neurons(x, method='LEAFS', master=base_skid)
x = core.CatmaidNeuronList(x)
break
# ... or just skip remaining fragments
else:
logger.warning(msg + " Skipping.")
x = [n for n in x if n.skeleton_id == base_skid]
break
# Now collapse minion nodes that are within distance limits into master
to_clps = minion.nodes.loc[nn_dist <= limit, 'node_id'].values
clps_into = master.nodes.loc[nn_ix[nn_dist <= limit], 'node_id'].values
# Generate a map: minion node -> master node to collapse into
clps_map = dict(zip(to_clps, clps_into))
# Track the collapsed node into the master
if track:
for n1, n2 in zip(to_clps, clps_into):
all_clps_nodes[n2] = all_clps_nodes.get(n2, []) + [n1]
# Reroot minion to one of the nodes that will be collapsed
graph_utils.reroot_neuron(minion, to_clps[0], inplace=True)
# Collapse nodes by first dropping all collapsed nodes
minion.nodes = minion.nodes.loc[~minion.nodes.node_id.isin(to_clps)]
# Make independent of original table to prevent warnings
minion.nodes = minion.nodes.copy()
# Reconnect children of the collapsed nodes to their new parents
to_rewire = minion.nodes.parent_id.isin(to_clps)
# Track old parents before rewiring
if track:
minion.nodes['old_parent'] = None
minion.nodes.loc[to_rewire, 'old_parent'] = minion.nodes.loc[to_rewire, 'parent_id']
# Now rewire
new_parents = minion.nodes.loc[to_rewire, 'parent_id'].map(clps_map)
minion.nodes.loc[to_rewire, 'parent_id'] = new_parents
# Merge minion's node table into master
master.nodes = pd.concat([master.nodes, minion.nodes],
axis=0,
sort=True,
ignore_index=True)
# Now some clean up! First up: node tags
# Make sure tags in minion are mapped onto their new IDs
tags = {k: [clps_map.get(n, n) for n in v] for k, v in minion.tags.items()}
# Combine master and minion tags
master.tags = {k: v + tags.get(k, []) for k, v in master.tags.items()}
master.tags.update({k: v for k, v in tags.items() if k not in master.tags})
# Last but not least: merge connector tables
new_tn = minion.connectors.loc[minion.connectors.node_id.isin(to_clps),
'node_id'].map(clps_map)
if track:
minion.connectors['old_treenode'] = None
minion.connectors.loc[minion.connectors.node_id.isin(to_clps),
'old_treenode'] = minion.connectors.loc[minion.connectors.node_id.isin(to_clps),
'node_id']
minion.connectors.loc[minion.connectors.node_id.isin(to_clps),
'node_id'] = new_tn
master.connectors = pd.concat([master.connectors, minion.connectors],
axis=0,
sort=True,
ignore_index=True)
# Reset master's attributes (graph, node types, etc)
master._clear_temp_attr()
# Almost done. Just need to pop minion from "x"
x = [n for n in x if n.skeleton_id != minion.skeleton_id]
union = x[0]
# Keep track of old IDs
if track:
# List of nodes merged into this node
union.nodes['treenodes_merged'] = union.nodes.node_id.map(all_clps_nodes)
# Return the last survivor
return union
def remove_tagged_branches(x, tag, how='segment', preserve_connectors=False,
inplace=False):
"""Removes branches with a given treenode tag (e.g. ``not a branch``).
Parameters
----------
x : CatmaidNeuron | CatmaidNeuronList
Neuron(s) to be processed.
tag : str
Treeode tag to use.
how : 'segment' | 'distal' | 'proximal', optional
Method of removal:
1. ``segment`` removes entire segment
2. ``distal``/``proximal`` removes everything
distal/proximal to tagged node(s), including
that node.
preserve_connectors : bool, optional
If True, connectors that got disconnected during
branch removal will be reattached to the closest
surviving downstream node.
inplace : bool, optional
If False, a copy of the neuron is returned.
Returns
-------
CatmaidNeuron/List
Pruned neuron(s). Only if ``inplace=False``.
Examples
--------
1. Remove not-a-branch terminal branches
>>> x = pymaid.get_neuron(16)
>>> x_prun = pymaid.remove_tagged_branches(x,
... 'not a branch',
... how='segment',
... preserve_connectors=True)
2. Prune neuron to microtubule-containing backbone
>>> x_prun = pymaid.remove_tagged_branches(x,
... 'microtubule ends',
... how='distal',
... preserve_connectors=False)
"""
def _find_next_remaining_parent(tn):
"""Helper function that walks from a treenode to the neurons root and
returns the first parent that will not be removed.
"""
this_nodes = x.nodes.set_index('node_id')
while True:
this_parent = this_nodes.loc[tn, 'parent_id']
if this_parent not in to_remove:
return tn
tn = this_parent
if isinstance(x, core.CatmaidNeuronList):
if not inplace:
x = x.copy()
for n in config.tqdm(x, desc='Removing', disable=config.pbar_hide,
leave=config.pbar_leave):
remove_tagged_branches(n, tag,
how=how,
preserve_connectors=preserve_connectors,
inplace=True)
if not inplace:
return x
elif not isinstance(x, core.CatmaidNeuron):
raise TypeError('Can only process CatmaidNeuron or CatmaidNeuronList, '
'not "{0}"'.format(type(x)))
# Check if method is valid
VALID_METHODS = ['segment', 'distal', 'proximal']
if how not in VALID_METHODS:
raise ValueError('Invalid value for "how": '
'{0}. Valid methods are: {1}'.format(how,
', '.join(VALID_METHODS)))
# Skip if tag not present
if tag not in x.tags:
logger.info(
'No "{0}" tag found on neuron #{1}... skipped'.format(tag, x.skeleton_id))
if not inplace:
return x
return
if not inplace:
x = x.copy()
tagged_nodes = set(x.tags[tag])
if how == 'segment':
# Find segments that have a tagged node
tagged_segs = [s for s in x.small_segments if set(s) & tagged_nodes]
# Sanity check: are any of these segments non-terminals?
non_term = [s for s in tagged_segs if x.graph.degree(s[0]) > 1]
if non_term:
logger.warning(
'Pruning {0} non-terminal segment(s)'.format(len(non_term)))
# Get nodes to be removed (excluding the last node -> branch )
to_remove = set([t for s in tagged_segs for t in s[:-1]])
# Rewire connectors before we subset
if preserve_connectors:
# Get connectors that will be disconnected
lost_cn = x.connectors[x.connectors.node_id.isin(to_remove)]
# Map to a remaining treenode
# IMPORTANT: we do currently not account for the possibility that
# we might be removing the root segment
new_tn = [_find_next_remaining_parent(tn) for tn in lost_cn.node_id.values]
x.connectors.loc[x.connectors.node_id.isin(to_remove), 'node_id'] = new_tn
# Subset to remaining nodes - skip the last node in each segment
graph_utils.subset_neuron(x,
subset=x.nodes[~x.nodes.node_id.isin(
to_remove)].node_id.values,
keep_disc_cn=preserve_connectors,
inplace=True)
if not inplace:
return x
return
elif how in ['distal', 'proximal']:
# Keep pruning until no more treenodes with our tag are left
while tag in x.tags:
# Find nodes distal to this tagged node (includes the tagged node)
dist_graph = nx.bfs_tree(x.graph, x.tags[tag][0], reverse=True)
if how == 'distal':
to_remove = list(dist_graph.nodes)
elif how == 'proximal':
# Invert dist_graph
to_remove = x.nodes[~x.nodes.node_id.isin(dist_graph.nodes)].node_id.values
# Make sure the tagged treenode is there too
to_remove += [x.tags[tag][0]]
to_remove = set(to_remove)
# Rewire connectors before we subset
if preserve_connectors:
# Get connectors that will be disconnected
lost_cn = x.connectors[x.connectors.node_id.isin(
to_remove)]
# Map to a remaining treenode
# IMPORTANT: we do currently not account for the possibility
# that we might be removing the root segment
new_tn = [_find_next_remaining_parent(tn) for tn in lost_cn.node_id.values]
x.connectors.loc[x.connectors.node_id.isin(to_remove), 'node_id'] = new_tn
# Subset to remaining nodes
graph_utils.subset_neuron(x,
subset=x.nodes[~x.nodes.node_id.isin(
to_remove)].node_id.values,
keep_disc_cn=preserve_connectors,
inplace=True)
if not inplace:
return x
return
def time_machine(x, target, inplace=False, remote_instance=None):
"""Reverses time and make neurons young again!
Prunes a neuron back to it's state at a given date. Here is what we can
reverse:
1. Creation and deletion of nodes
2. Creation and deletion of connectors (and links)
3. Movement of nodes and connectors
4. Cuts and merges
5. Addition of node tags & annotations (even deleted ones)
Unfortunately time travel has not yet been perfected. We are oblivious to:
1. Removal of tags/annotations: i.e. we know when e.g. a tag was added
and that it was subsequently removed at some point but not when.
Parameters
----------
x : skeleton ID(s) | CatmaidNeuron | CatmaidNeuronList
Neuron(s) to rejuvenate.
target : str | datetime-like | pandas.Timestamp
Date or date + time to time-travel to. Must be
parsable by ``pandas.TimeStamp``. Format for string
is YEAR-MONTH-DAY.
inplace : bool, optional
If True, will perform time travel on and return a copy
of original.
remote_instance : CatmaidInstance, optional
Returns
-------
CatmaidNeuron/List
A younger version of the neuron(s).
Examples
--------
>>> n = pymaid.get_neuron(16)
>>> previous_n = pymaid.time_machine(n, '2016-12-1')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(x, core.CatmaidNeuronList):
return core.CatmaidNeuronList([time_machine(n, target,
inplace=inplace,
remote_instance=remote_instance)
for n in config.tqdm(x,
'Traveling time',
disable=config.pbar_hide,
leave=config.pbar_leave)])
if not isinstance(x, core.CatmaidNeuron):
x = fetch.get_neuron(x, remote_instance=remote_instance)
if not inplace:
x = x.copy()
if not isinstance(target, pd.Timestamp):
target = pd.Timestamp(target)
# Need to localize all timestamps
target = target.tz_localize('UTC')
if target > pd.Timestamp.now().tz_localize('UTC'):
raise ValueError("This is not Back to the Future II: for forward time "
"travel, you'll have to trace yourself.")
# First get the entire history of the neuron
url = remote_instance._get_compact_details_url(x.skeleton_id,
with_history=True,
with_merge_history=True,
with_connectors=True,
with_tags=True,
with_annotations=True)
data = remote_instance.fetch(url)
# Turn stuff into DataFrames for easier sifting/sorting
nodes = pd.DataFrame(data[0], columns=['node_id', 'parent_id',
'user_id', 'x', 'y', 'z', 'radius',
'confidence', 'creation_timestamp',
'modified_timestamp', 'ordering_by'])
nodes.loc[:, 'parent_id'] = nodes.parent_id.values.astype(object)
nodes.loc[~nodes.parent_id.isnull(), 'parent_id'] = nodes.loc[~nodes.parent_id.isnull(), 'parent_id'].map(int)
nodes.loc[nodes.parent_id.isnull(), 'parent_id'] = None
connectors = pd.DataFrame(data[1], columns=['node_id', 'connector_id',
'relation', 'x', 'y', 'z',
'creation_timestamp',
'modified_timestamp'])
# This is a dictionary with {'tag': [[node_id, date_tagged], ...]}
tags = data[2]
annotations = pd.DataFrame(data[4], columns=['annotation_id',
'annotated_timestamp'])
an_list = fetch.get_annotation_list(remote_instance=remote_instance)
annotations['annotation'] = annotations.annotation_id.map(an_list.set_index('id').name.to_dict())
# Convert stuff to timestamps
for ts in ['creation_timestamp', 'modified_timestamp']:
nodes[ts] = nodes[ts].map(pd.Timestamp)
connectors[ts] = connectors[ts].map(pd.Timestamp)
annotations['annotated_timestamp'] = annotations['annotated_timestamp'].map(pd.Timestamp)
tags = {t: [[e[0], pd.Timestamp(e[1])] for e in tags[t]] for t in tags}
# General rules:
# 1. creation_timestamp and modified timestamp represent a validity
# intervals.
# 2. Nodes where creation_timestamp is older than modified_timestamp,
# represent the existing, most up-to-date versions.
# 3. Nodes with creation_timestamp younger than modified_timestamp, and
# with NO future version of themselves, got cut off/deleted at
# modification time.
# 4. Useful little detail: nodes/connectors are ordered by new -> old
# First change the modified_timestamp for nodes that still exist
# (see rule 2) to right now
nodes.loc[nodes.creation_timestamp > nodes.modified_timestamp, 'modified_timestamp'] = pd.Timestamp.now().tz_localize('UTC')
connectors.loc[connectors.creation_timestamp > connectors.modified_timestamp, 'modified_timestamp'] = pd.Timestamp.now().tz_localize('UTC')
# Remove nodes without a window (these seems to be temporary states)
nodes = nodes[nodes.creation_timestamp != nodes.modified_timestamp]
connectors = connectors[connectors.creation_timestamp != connectors.modified_timestamp]
# Second subset to versions of the nodes that existed at given time
before_nodes = nodes[(nodes.creation_timestamp <= target) & (nodes.modified_timestamp >= target)]
before_connectors = connectors[(connectors.creation_timestamp <= target) & (connectors.modified_timestamp >= target)]
# Now fix tags and annotations
before_annotations = annotations[annotations.annotated_timestamp <= target]
before_tags = {t: [e[0] for e in tags[t] if e[1] <= target] for t in tags}
before_tags = {t: before_tags[t] for t in before_tags if before_tags[t]}
x.nodes = before_nodes.copy()
x.connectors = before_connectors.copy()
x.annotations = before_annotations.copy()
x.tags = before_tags
# We might end up with multiple disconnected pieces - I don't yet know why
x.nodes.loc[~x.nodes.parent_id.isin(x.nodes.node_id), 'parent_id'] = None
# If there is more than one root, we have to remove the disconnected
# pieces and keep only the "oldest branch".
# The theory for doing this is: if a node shows up as "root" and the very
# next step is that it is a child to another node, we should consider
# it a not-yet connected branch that needs to be removed.
roots = x.nodes[x.nodes.parent_id.isnull()].node_id.tolist()
if len(roots) > 1:
after_nodes = nodes[nodes.modified_timestamp > target]
for r in roots:
# Find the next version of this node
nv = after_nodes[(after_nodes.node_id == r)]
# If this node is not a root anymore in its next iteration, it's
# not the "real" one
if not nv.empty and nv.iloc[-1].parent_id is not None:
roots.remove(r)
# Get disconnected components
g = graph.neuron2nx(x)
subgraphs = [l for l in nx.connected_components(nx.to_undirected(g))]
# If we have a winner root, keep the bit that it is part of
if len(roots) == 1:
keep = [l for l in subgraphs if roots[0] in l][0]
# If we have multiple winners (unlikely) or none (e.g. if the "real"
# root got rerooted too) go for the biggest branch
else:
keep = sorted(subgraphs, key=lambda x: len(x), reverse=True)[0]
x.nodes = x.nodes[x.nodes.node_id.isin(keep)].copy()
# Remove connectors where the treenode does not even exist yet
x.connectors = x.connectors[x.connectors.node_id.isin(x.nodes.node_id)]
# Take care of connectors where the treenode might exist but was not yet linked
links = fetch.get_connector_links(x.skeleton_id)
#localize = lambda x: pd.Timestamp.tz_localize(x, 'UTC')
#links['creation_time'] = links.creation_time.map(localize)
links = links[links.creation_time <= target]
links['connector_id'] = links.connector_id.astype(int)
links['node_id'] = links.node_id.astype(int)
# Get connector ID -> treenode combinations
l = links[['connector_id', 'node_id']].T.apply(tuple)
c = x.connectors[['connector_id', 'node_id']].T.apply(tuple)
# Keep only those where connector->treenode connection is present
x.connectors = x.connectors[c.isin(l)]
x._clear_temp_attr()
if not inplace:
return x
return
def prune_by_length(x, min_length=0, max_length=float('inf'), inplace=False):
"""Remove segments of given length.
This uses :func:`pymaid.graph_utils._generate_segments` to generate
segments that maximize segment lengths.
Parameters
----------
x : CatmaidNeuron/List
min_length : int | float
Twigs shorter than this length [um] will be pruned.
max_length : int | float
Segments longer than this length [um] will be pruned.
inplace : bool, optional
If False, pruning is performed on copy of original neuron
which is then returned.
Returns
-------
CatmaidNeuron/List
Pruned neuron(s).
See Also
--------
:func:`pymaid.longest_neurite`
If you want to keep/remove just the N longest neurites
instead of using a length cut-off.
:func:`pymaid.prune_twigs`
Use if you are looking to remove only terminal branches of
a given size.
Examples
--------
>>> import pymaid
>>> n = pymaid.get_neurons(16)
>>> # Remove neurites longer than 100mirons
>>> n_pr = pymaid._prune_by_length(n,
... min_length=0,
... max_length=100,
... inplace=False)
>>> n.n_nodes > n_pr.n_nodes
True
"""
if isinstance(x, core.CatmaidNeuronList):
if not inplace:
x = x.copy()
[prune_by_length(n,
min_length=min_length,
max_length=max_length,
inplace=True) for n in config.tqdm(x, desc='Pruning',
disable=config.pbar_hide,
leave=config.pbar_leave)]
if not inplace:
return x
else:
return None
elif isinstance(x, core.CatmaidNeuron):
neuron = x
else:
raise TypeError('Expected CatmaidNeuron/List, got {}'.format(type(x)))
# Make a copy if necessary before making any changes
if not inplace:
neuron = neuron.copy()
# Convert units to nanometres
min_length *= 1000
max_length *= 1000
# Find terminal segments
segs = graph_utils._generate_segments(neuron, weight='weight')
segs = np.array(segs)
# Get segment lengths
seg_lengths = np.array([graph_utils.segment_length(neuron, s) for s in segs])
# Find out which to delete
segs_to_delete = segs[(seg_lengths < min_length) | (seg_lengths > max_length)]
if segs_to_delete.any():
# Unravel the into list of node IDs -> skip the last parent
nodes_to_delete = [n for s in segs_to_delete for n in s[:-1]]
# Subset neuron
nodes_to_keep = neuron.nodes[~neuron.nodes.node_id.isin(nodes_to_delete)].node_id.values
graph_utils.subset_neuron(neuron,
nodes_to_keep,
inplace=True)
if not inplace:
return neuron
else:
return None
|
schlegelp/pymaid
|
pymaid/morpho.py
|
Python
|
gpl-3.0
| 32,509
|
[
"NEURON"
] |
09009e49388c6cbf2ff3dd6b72d70990b739d92eb6dd4e23eb704e39e4c8fbc8
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is vasp high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides vasp python interface to physical input, such as
# crystal structures, as well as to vasp number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR vasp PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received vasp copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture, mark
@fixture
def vasp():
from pylada.vasp import Vasp
vasp = Vasp()
for key in list(vasp._input.keys()):
if key not in ['isif', 'nsw', 'ibrion', 'relaxation']:
del vasp._input[key]
return vasp
def test_fixture(vasp):
""" Makes sure fixture is empty of anything but relaxation stuff """
assert len(vasp.output_map(vasp=vasp)) == 1
def test_default_is_static(vasp):
assert vasp.relaxation == 'static'
assert vasp.ibrion is None
assert vasp.isif is None
assert vasp.nsw is None
assert vasp.output_map(vasp=vasp)['ibrion'] == str(-1)
@mark.parametrize('prior', ['static', 'cellshape volume'])
def test_static(vasp, prior):
vasp.relaxation = prior
vasp.relaxation = 'static'
assert vasp.relaxation == 'static'
assert vasp.ibrion == -1 or vasp.ibrion is None
assert vasp.nsw == 0 or vasp.nsw is None
assert vasp.isif == 2 or vasp.isif is None
assert vasp.output_map(vasp=vasp)['ibrion'] == str(-1)
def check_cellshape(vasp, nsw=50):
assert vasp.relaxation == 'cellshape'
assert vasp.isif == 5
assert vasp.nsw == nsw
assert vasp.ibrion == 2
assert len(vasp.output_map(vasp=vasp)) == 3
assert vasp.output_map(vasp=vasp)['ibrion'] == str(2)
assert vasp.output_map(vasp=vasp)['isif'] == str(5)
assert vasp.output_map(vasp=vasp)['nsw'] == str(nsw)
def test_cellshape(vasp):
vasp.relaxation = 'cellshape'
check_cellshape(vasp)
vasp.nsw = 25
check_cellshape(vasp, 25)
def test_cellshape_and_pickle(vasp):
from pickle import loads, dumps
vasp.relaxation = 'cellshape'
check_cellshape(loads(dumps(vasp)))
vasp.nsw = 25
check_cellshape(loads(dumps(vasp)), 25)
def test_cellshape_volume(vasp):
vasp.relaxation = 'cellshape volume'
vasp.nsw = 25
assert vasp.isif == 6
assert vasp.relaxation == 'cellshape volume'
assert vasp.nsw == 25
assert vasp.ibrion == 2
assert len(vasp.output_map(vasp=vasp)) == 3
assert vasp.output_map(vasp=vasp)['ibrion'] == str(2)
assert vasp.output_map(vasp=vasp)['isif'] == str(6)
assert vasp.output_map(vasp=vasp)['nsw'] == str(25)
@mark.parametrize('relaxation', ['ions', 'ionic'])
def test_ions(vasp, relaxation):
vasp.relaxation = relaxation
assert vasp.relaxation == 'ionic'
assert vasp.isif == 2
def test_cellshape_volume_ions(vasp):
vasp.relaxation = 'cellshape, volume ions'
assert vasp.relaxation == 'cellshape ionic volume'
assert vasp.isif == 3
def test_cellshape_ionic(vasp):
vasp.relaxation = 'cellshape, ionic'
assert vasp.relaxation == 'cellshape ionic'
assert vasp.isif == 4
def test_volume(vasp):
vasp.relaxation = 'volume'
assert vasp.relaxation == 'volume'
assert vasp.isif == 7
def test_ions_volume_fails(vasp):
from pytest import raises
with raises(ValueError):
vasp.relaxation = "ions, volume"
|
pylada/pylada-light
|
tests/vasp/test_relax_attribute.py
|
Python
|
gpl-3.0
| 4,113
|
[
"CRYSTAL",
"VASP"
] |
79df7f9421d9df74ad7e4c16e70e4af2c0c8ab1cb26fa8714e1b64787661ada5
|
"""Reads the fasttext word embeddings
Visit: https://fasttext.cc/docs/en/english-vectors.html#format"""
import os
import numpy as np
import anna.data.utils as utils
DESTINATION = "fasttext"
NAME = "wiki-news-300d-1M-subword.vec"
ZIP_NAME = NAME + ".zip"
URL = "https://s3-us-west-1.amazonaws.com/fasttext-vectors/" + ZIP_NAME
def fetch_and_parse(data_dir, voc_size=None):
"""
Fetches and parses the fasttext word embeddings dataset. The dataset is
also cached as a pickle for further calls.
Args:
data_dir (str): absolute path to the dir where datasets are stored
voc_size (int): maximum size of the vocabulary, None for no limit
Returns:
voc (list[str]): list of words, matching the index in `emb`
emb (numpy.array): array of embeddings for each word in `voc`
"""
return parse(fetch(data_dir), voc_size)
def parse(fasttext_dir, voc_size):
"""
Parses the fasttext word embeddings.
Args:
fasttext_dir (str): absolute path to the extracted word embeddings
voc_size (int): maximum size of the vocabulary, None for no limit
Returns:
voc (list[str]): list of words, matching the index in `emb`
emb (numpy.array): array of embeddings for each word in `voc`
"""
voc = []
emb = []
words = set()
first = True
fasttext_path = os.path.join(fasttext_dir, NAME)
with open(fasttext_path) as f:
for line in f:
# First line contains # words and embedding sizes, skip
if first:
first = False
continue
parts = line.split(" ")
word = parts[0]
if word not in words:
words.add(word)
voc.append(word)
emb.append([float(n) for n in parts[1:]])
if len(voc) >= voc_size:
break
return utils.add_special_tokens(voc, np.array(emb))
def fetch(data_dir):
"""
Fetches and extracts pre-trained fastText word vectors.
Args:
data_dir (str): absolute path to the folder where datasets are stored
Returns:
fasttext_dir (str): absolute path to the folder with fasttext data
"""
file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)
result_path = os.path.join(data_dir, DESTINATION, NAME)
return utils.fetch(URL, file_path, result_path)
|
jpbottaro/anna
|
anna/data/dataset/fasttext.py
|
Python
|
mit
| 2,392
|
[
"VisIt"
] |
d8c105f0c04e60db873cc1518de087528650276dcbb06d87f4007c117bc24c2f
|
import director
import math
import textwrap
import drc as lcmdrc
import bot_core as lcmbotcore
import vtkAll as vtk
from director import transformUtils
from director import visualization as vis
from director import objectmodel as om
from director import lcmUtils
from director import ikconstraints
from director import cameraview
from director import affordanceupdater
from director import affordancemanager
from director import segmentation
from director import robotstate
from director.debugVis import DebugData
from director.utime import getUtime
from director.ikplanner import ConstraintSet
import director.tasks.robottasks as rt
from director.ikparameters import IkParameters
from director.timercallback import TimerCallback
import bot_core
import os
import functools
import numpy as np
import scipy.io
from director.tasks.taskuserpanel import TaskUserPanel
from director import drcargs
class DrivingPlanner(object):
def __init__(self, ikServer, robotSystem):
self.ikServer = ikServer
self.robotSystem = robotSystem
self.ikServer.connectStartupCompleted(self.initialize)
self.steeringAngleDegrees = 0.0
self.maxTurningRadius = 9.5
self.trajectoryX = 0
self.trajectoryY = 0.3
self.trajectoryAngle = 0
self.trajSegments = 25
self.wheelDistance = 1.4
self.tagToLocalTransform = transformUtils.transformFromPose([0,0,0],[1,0,0,0])
self.commandStreamChannel = 'JOINT_POSITION_GOAL'
self.drivingThrottleJoint = drcargs.getDirectorConfig()['drivingThrottleJoint']
self.drivingSteeringJoint = drcargs.getDirectorConfig()['drivingSteeringJoint']
self.akyIdx = robotstate.getDrakePoseJointNames().index( self.drivingThrottleJoint )
self.lwyIdx = robotstate.getDrakePoseJointNames().index( self.drivingSteeringJoint )
self.anklePositions = np.array([np.nan,np.nan])
self.jointLimitsMin = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.robotSystem.teleopRobotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
self.idleAngleSlack = 10
self.fineGrainedThrottleTravel = 10
self.steeringAngleOffset = 0
self.throttlePublishChannel = 'SINGLE_JOINT_POSITION_GOAL'
self.steeringPublishChannel = 'SINGLE_JOINT_POSITION_GOAL'
self.addSubscribers()
self.graspWheelAngle = None
self.graspWristAngle = None
self.kneeInPedal = 0
self.angleToleranceInDegrees = 10
self.distanceAbovePedal = 0.05
self.distanceAboveFootStartPose = 0.2
self.plans = []
self.throttleCommandTimer = TimerCallback(targetFps=5)
self.throttleCommandTimer.callback = self.publishThrottleCommand
self.throttleCommandMsg = None
self.steeringCommandTimer = TimerCallback(targetFps=5)
self.steeringCommandTimer.callback = self.publishSteeringCommand
self.steeringCommandMsg = None
@staticmethod
def isCompatibleWithConfig():
return 'drivingThrottleJoint' in drcargs.getDirectorConfig()
def getInitCommands(self):
commands = [textwrap.dedent('''
% ------ driving planner startup ------
addpath([getenv('DRC_BASE'), '/software/control/matlab/planners/driving_planner']);
clear driving_planner_options;
driving_planner_options.listen_to_lcm_flag = 0;
driving_planner_options.qstar = q_nom;
dp = drivingPlanner(s.robot, driving_planner_options);
% ------ driving planner startup end ------
''')]
return commands
def addSubscribers(self):
lcmUtils.addSubscriber('THROTTLE_COMMAND', lcmdrc.trigger_finger_t , self.onThrottleCommand)
lcmUtils.addSubscriber('STEERING_COMMAND', lcmdrc.driving_control_cmd_t , self.onSteeringCommand)
def initialize(self, ikServer, success):
if ikServer.restarted:
return
commands = self.getInitCommands()
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
# applies the properties to the driving planner object
def applyProperties(self):
commands = []
commands.append("dp.options.quat_tol = %r;" % self.quatTol)
commands.append("dp.options.tol = %r;" % self.positionTol)
commands.append("dp.options.seed_with_current = %r;" % self.seedWithCurrent)
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def updateWheelTransform(self, xyzquat):
commands = []
startPose = self.getPlanningStartPose()
commands.append("q0 = %s;" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
commands.append("xyzquat = %s;" % ikconstraints.ConstraintBase.toColumnVectorString(xyzquat))
commands.append("dp = dp.updateWheelTransform(xyzquat, q0);")
self.ikServer.comm.sendCommands(commands)
def planSafe(self, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planSafe(options,%s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planPreGrasp(self, depth=0.2, xyz_des=None, angle=0, speed=1, graspLocation='center', turnRadius=0.187):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.graspLocation = '%s';" % graspLocation)
commands.append("options.angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
if xyz_des is not None:
commands.append("options.xyz_des = {%s};",ikconstraints.ConstraintBase.toColumnVectorString(xyz_des))
startPose = self.getPlanningStartPose()
commands.append("dp.planPreGrasp(options, %s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planTouch(self, depth=0, xyz_des=None, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %r;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planTouch(options, %s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planRetract(self, depth=0.2, speed=1):
commands = []
commands.append("clear options;")
commands.append("options = struct('depth',{%r});" % depth)
commands.append("options.speed = %s;" % speed)
startPose = self.getPlanningStartPose()
commands.append("dp.planRetract(options, %s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planTurn(self, angle=0, speed=1):
commands = []
commands.append("clear options;")
commands.append("options.turn_angle = %r;" % np.radians(angle))
commands.append("options.speed = %r;" % speed)
commands.append("options.use_raw_angle = 1;")
startPose = self.getPlanningStartPose()
commands.append("dp.planTurn(options,%s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
listener = self.getManipPlanListener()
self.ikServer.comm.sendCommands(commands)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planSteeringWheelTurn(self, speed=1, knotPoints=20, turnRadius=.187, gazeTol=0.3):
commands = []
commands.append("clear options;")
commands.append("options.speed = %r;" % speed)
commands.append("options.turn_radius = %r;" % turnRadius)
commands.append("options.N = %r;" % knotPoints)
commands.append("options.steering_gaze_tol = %r;" % gazeTol)
startPose = self.getPlanningStartPose()
commands.append("dp.planSteeringWheelTurn(options,%s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def planSeed(self):
commands = []
startPose = self.getPlanningStartPose()
commands.append("dp.planSeed(%s);" % ikconstraints.ConstraintBase.toColumnVectorString(startPose))
self.ikServer.taskQueue.addTask(functools.partial(self.ikServer.comm.sendCommandsAsync, commands))
self.ikServer.taskQueue.start()
def getPlanningStartPose(self):
return self.robotSystem.robotStateJointController.q
# move left leg up a bit
def planLegUp(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
# targetFrame = transformUtils.copyFrame(lFoot2World)
# targetFrame.PreMultiply()
# targetFrame.Translate([0.0,0.0, self.distanceAboveFootStartPose])
targetFrame = transformUtils.copyFrame(om.findObjectByName('left foot up frame').transform)
footPoseConstraint = self.createLeftFootPoseConstraint(targetFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(footPoseConstraint)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_start'
cs.nominalPoseName = 'q_start'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingIn(self):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
if self.kneeInPedal:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving knee in').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([0.0, 0.0, self.distanceAbovePedal])
else:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02, 0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1], angleToleranceInDegrees=self.angleToleranceInDegrees)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
# add constraint that we hit intermediate frame, maybe doesn't have to be exact???
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.3,0.3]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegAbovePedal(self, startPose=None):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
if self.kneeInPedal:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving knee in').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([0.0, 0, self.distanceAbovePedal])
else:
legAbovePedalFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
legAbovePedalFrame.PreMultiply()
legAbovePedalFrame.Translate([-0.02, 0.0, 0.03])
identityFrame = vtk.vtkTransform()
legAbovePedalConstraint = self.createLeftFootPoseConstraint(legAbovePedalFrame, tspan=[1,1], angleToleranceInDegrees=self.angleToleranceInDegrees)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legAbovePedalConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegSwingOut(self, startPose=None):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
# legUpFrame = transformUtils.copyFrame(om.findObjectByName('left foot start').transform)
# legUpFrame.PreMultiply()
# legUpFrame.Translate([0.0,0.0, self.distanceAboveFootStartPose])
legUpFrame = transformUtils.copyFrame(om.findObjectByName('left foot up frame').transform)
identityFrame = vtk.vtkTransform()
legUpConstraint = self.createLeftFootPoseConstraint(legUpFrame, tspan=[1,1], angleToleranceInDegrees=10)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legUpConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'car_entry_new')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
legSwingFrame = om.findObjectByName('left foot pedal swing').transform
cs.constraints.extend(self.createLeftFootPoseConstraint(legSwingFrame, tspan=[0.7,0.7]))
keyFramePlan = cs.runIkTraj()
self.plans.append(keyFramePlan)
return keyFramePlan
def planLegEgressStart(self, startPose=None):
om.findObjectByName('left foot driving')
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
lFoot2RFoot = om.findObjectByName('left foot to right foot')
assert lFoot2RFoot
rFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('r_foot', startPose)
lFootGoalFrame = transformUtils.concatenateTransforms([transformUtils.copyFrame(lFoot2RFoot.transform), rFoot2World])
legDownFrame = transformUtils.copyFrame(lFootGoalFrame)
identityFrame = vtk.vtkTransform()
legDownConstraint = self.createLeftFootPoseConstraint(legDownFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(legDownConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
plan = cs.planEndPoseGoal()
self.plans.append(plan)
return plan
def planLegEgressFull(self):
legAbovePedalName = 'qtraj_leg_above_pedal'
self.planLegAbovePedal()
self.saveOriginalTraj(legAbovePedalName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
self.planLegSwingOut(startPose=nextStartPose)
legSwingOutName = 'qtraj_leg_swing_out'
self.saveOriginalTraj(legSwingOutName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
self.planLegEgressStart(startPose=nextStartPose)
legEgressStartName = 'qtraj_leg_egress_start'
self.saveOriginalTraj(legEgressStartName)
ikParameters = IkParameters(usePointwise=False, maxDegreesPerSecond=10)
ikParameters = self.robotSystem.ikPlanner.mergeWithDefaultIkParameters(ikParameters)
listener = self.getManipPlanListener()
_ = self.concatenateAndRescaleTrajectories([legAbovePedalName, legSwingOutName, legEgressStartName], 'qtraj_foot_egress_start', 'ts', ikParameters)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def planLegPedal(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_start_foot'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_foot_end'
if self.kneeInPedal:
lfootConstraintFrame = transformUtils.copyFrame(om.findObjectByName('left foot on pedal').transform)
else:
lfootConstraintFrame = transformUtils.copyFrame(om.findObjectByName('left foot driving').transform)
identityFrame = vtk.vtkTransform()
lfootPositionOrientationConstraint = ikPlanner.createPositionOrientationConstraint('l_foot', lfootConstraintFrame, identityFrame)
allButLeftLegPostureConstraint = self.createAllButLeftLegPostureConstraint(startPoseName)
constraints = [allButLeftLegPostureConstraint]
constraints.extend(lfootPositionOrientationConstraint)
seedPoseName = 'q_driving'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
cs = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
cs.ikParameters = IkParameters(quasiStaticShrinkFactor=1, maxDegreesPerSecond=10, usePointwise=False)
cs.seedPoseName = 'q_driving'
cs.nominalPoseName = 'q_driving'
endPose = cs.runIk()
keyFramePlan = cs.planEndPoseGoal()
self.plans.append(keyFramePlan)
return keyFramePlan
def captureHandPose(self):
startPose = self.getPlanningStartPose()
self.wheelAngleBeforeReGrasp = self.getSteeringWheelAngle()
ikPlanner = self.robotSystem.ikPlanner
handName = 'left'
palmToHand = ikPlanner.getPalmToHandLink(handName)
palmToWorld = ikPlanner.newGraspToWorldFrame(startPose, handName, palmToHand)
self.palmToWorldBeforeRegrasp = palmToWorld
def planSteeringWheelReGrasp(self, useLineConstraint=True):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
startPoseName = 'q_regrasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_regrasp_end'
handName = 'left'
handLinkName = 'l_hand'
maxMetersPerSecond = 0.1
retractDepth = 0.15
palmToHand = ikPlanner.getPalmToHandLink(handName)
palmToWorldBeforeRegrasp = self.palmToWorldBeforeRegrasp
finalTargetFrame = transformUtils.copyFrame(palmToWorldBeforeRegrasp)
finalTargetFrame.PreMultiply()
finalTargetFrame.RotateY(180)
finalPoseConstraint = self.createLeftPalmPoseConstraints(finalTargetFrame, tspan=[1,1])
palmToWorld = ikPlanner.newGraspToWorldFrame(startPose, handName, palmToHand)
palmPosition = palmToWorld.GetPosition()
_, finalPose = transformUtils.poseFromTransform(finalTargetFrame)
preGraspTargetFrame = transformUtils.transformFromPose(palmPosition, finalPose)
preGraspPoseConstraint = self.createLeftPalmPoseConstraints(preGraspTargetFrame, tspan=[0.5, 0.5])
allButLeftArmPostureConstraint = self.createAllButLeftArmPostureConstraint(startPoseName)
lockedBaseConstraint = ikPlanner.createLockedBasePostureConstraint(startPoseName)
lockedRightArmConstraint = ikPlanner.createLockedRightArmPostureConstraint(startPoseName)
lockedTorsoConstraint = ikPlanner.createLockedTorsoPostureConstraint(startPoseName)
constraints = [allButLeftArmPostureConstraint]
constraints.extend(finalPoseConstraint)
seedPoseName = 'q_regrasp_seed'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'driving')
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxDegreesPerSecond=60,
maxBodyTranslationSpeed=maxMetersPerSecond, rescaleBodyNames=[handLinkName], rescaleBodyPts=list(ikPlanner.getPalmPoint()))
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
endPose = constraintSet.runIk()
# move on line constraint
motionVector = np.array(preGraspTargetFrame.GetPosition()) - np.array(finalTargetFrame.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(preGraspTargetFrame.GetPosition()), motionVector)
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.linspace(0,1,5)
endPose = constraintSet.runIk()
constraintSet.constraints.extend(preGraspPoseConstraint)
# orientation constraint for 0.5, 1
_, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints(handName, finalTargetFrame, graspToHandLinkFrame=palmToHand, positionTolerance=0.0, angleToleranceInDegrees=0.0)
orientationConstraint.tspan = np.array([0.5,0.6,0.8,1])
constraintSet.constraints.append(orientationConstraint)
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.runIkTraj()
self.plans.append(plan)
return plan
def createLeftFootPoseConstraint(self, targetFrame, tspan=[-np.inf, np.inf], angleToleranceInDegrees=0.0):
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_foot', targetFrame, vtk.vtkTransform(), angleToleranceInDegrees=angleToleranceInDegrees)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftPalmPoseConstraints(self, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftPalmPoseConstraints(self, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints('left', targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createPalmPoseConstraints(self, side, targetFrame, tspan=[-np.inf, np.inf]):
ikPlanner = self.robotSystem.ikPlanner
positionConstraint, orientationConstraint = ikPlanner.createPositionOrientationGraspConstraints(side, targetFrame)
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createLeftHandPoseConstraintOnWheel(self, depth=0.12, tspan=[-np.inf, np.inf]):
targetFrame = self.getSteeringWheelPalmFrame()
targetFrame.PreMultiply()
targetFrame.Translate([0.0, depth, 0.0])
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_hand_face', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def getSteeringWheelPalmFrame(self):
frame = transformUtils.copyFrame(om.findObjectByName('Steering Wheel frame').transform)
frame.PreMultiply()
frame.RotateX(90)
frame.PreMultiply()
frame.RotateZ(-90)
return frame
def planBarGrasp(self,depth=0.03, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(om.findObjectByName('right hand grab bar').transform)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False)
constraintSet.seedPoseName = seedPoseName
constraintSet.nominalPoseName = seedPoseName
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0], positionTolerance=0.001)
p.tspan = np.linspace(0.2,0.8,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def planBarRetract(self, depth=0.3, useLineConstraint=False):
ikPlanner = self.robotSystem.ikPlanner
handSide = 'right'
handLinkName = 'r_hand'
startPose = self.getPlanningStartPose()
startPoseName = 'q_grasp_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_end_grasp'
maxBodyTranslationSpeed = 0.3
palmToHand = ikPlanner.getPalmToHandLink(handSide)
palmToWorld = transformUtils.copyFrame(ikPlanner.newGraspToWorldFrame(startPose, handSide, palmToHand))
targetFrame = transformUtils.copyFrame(palmToWorld)
targetFrame.PreMultiply()
targetFrame.Translate([0.0,-depth,0.0])
finalPoseConstraints = self.createPalmPoseConstraints(handSide, targetFrame, tspan=[1,1])
allButRightArmPostureConstraint = self.createAllButRightArmPostureConstraint(startPoseName)
seedPoseName = 'q_bar_grab'
seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'bar_pre_grab', side=handSide)
self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName)
constraints = [allButRightArmPostureConstraint]
constraints.extend(finalPoseConstraints)
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(quasiStaticShrinkFactor=10, usePointwise=False, maxBodyTranslationSpeed=0.3)
constraintSet.seedPoseName = 'q_bar_grab'
constraintSet.nominalPoseName = 'q_bar_grab'
# move on line constraint
motionVector = np.array(targetFrame.GetPosition()) - np.array(palmToWorld.GetPosition())
motionTargetFrame = transformUtils.getTransformFromOriginAndNormal(np.array(targetFrame.GetPosition()), motionVector)
# vis.updateFrame(motionTargetFrame,'motion frame')
# vis.updateFrame(targetFrame, 'target')
# vis.updateFrame(currentFrame, 'current')
p = ikPlanner.createLinePositionConstraint(handLinkName, palmToHand, motionTargetFrame, lineAxis=2, bounds=[-np.linalg.norm(motionVector)*1, 0.0], positionTolerance=0.02)
p.tspan = np.linspace(0,1,5)
endPose = constraintSet.runIk()
if useLineConstraint:
constraintSet.constraints.append(p)
plan = constraintSet.runIkTraj()
else:
plan = constraintSet.planEndPoseGoal()
self.plans.append(plan)
return plan
def commitManipPlan(self):
self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1])
def createAllButLeftLegPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_leg)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButLeftArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def createAllButRightArmPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!r_arm)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def captureLeftFootToRightFootTransform(self):
startPose = self.getPlanningStartPose()
lFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
rFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('r_foot', startPose)
lFoot2RFoot = transformUtils.concatenateTransforms([lFoot2World, rFoot2World.GetLinearInverse()])
vis.showFrame(lFoot2RFoot, 'left foot to right foot', scale=0.2, visible=False)
def computeDrivingTrajectories(self, steeringAngleDegrees, maxTurningRadius = 10, numTrajPoints = 50):
angle = -steeringAngleDegrees
if abs(angle) < 0.1:
angle = 1e-8
turningRadius = 1.0 / (angle * (1 / (maxTurningRadius * 170.0)))
turningCenter = [0, turningRadius, 0]
trajPoints = list()
for i in range(0, numTrajPoints):
theta = math.radians((40 / turningRadius) * i - 90)
trajPoint = np.asarray(turningCenter)+turningRadius*np.asarray([math.cos(theta), math.sin(theta), 0])
trajPoints.append(trajPoint)
leftTraj = list()
rightTraj = list()
for i in range(0, numTrajPoints - 1):
v1 = trajPoints[i + 1] - trajPoints[i]
v2 = np.cross(v1, [0, 0, 1])
v2 /= np.linalg.norm(v2)
leftTraj.append(trajPoints[i] - 0.5 * self.wheelDistance * v2)
rightTraj.append(trajPoints[i] + 0.5 * self.wheelDistance * v2)
return leftTraj, rightTraj
def transformDrivingTrajectory(self, drivingTraj):
transformedDrivingTraj = list()
transform = vtk.vtkTransform()
z_axis = self.tagToLocalTransform.TransformVector([0,0,1])
tag_origin = self.tagToLocalTransform.TransformPoint([0,0,0])
z_norm = np.linalg.norm(z_axis[0:2])
if z_norm > 1e-6:
z_axis_proj = z_axis[0:2] / z_norm
angle = math.degrees(math.atan2(z_axis_proj[1], z_axis_proj[0]))
else:
angle = 0
transform.Translate([tag_origin[0] , tag_origin[1], 0])
transform.RotateZ(self.trajectoryAngle + angle)
transform.Translate([self.trajectoryX, self.trajectoryY, 0])
for p in drivingTraj:
transformedPoint = np.asarray(transform.TransformPoint(p))
transformedDrivingTraj.append(transformedPoint)
return transformedDrivingTraj
def onThrottleCommand(self, msg):
# slider 0 is the coarse grained slider, slider 1 is for fine grained adjustment
slider = self.decodeThrottleMessage(msg)
const = np.rad2deg(self.jointLimitsMin[self.akyIdx])
ankleGoalPosition = const + slider[0]*self.coarseGrainedThrottleTravel + (slider[1]-1/2.0)*self.fineGrainedThrottleTravel
ankleGoalPositionRadians = np.deg2rad(ankleGoalPosition)
# trip the safety if slider[3] is < 1/2, emergency come off the throttle
if slider[3] < 0.5:
print 'Emergency stop, coming off the throttle'
print "setting l_leg_aky to it's min value"
ankleGoalPositionRadians = self.jointLimitsMin[self.akyIdx]
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = ankleGoalPositionRadians
msg.joint_name = drcargs.getDirectorConfig()['drivingThrottleJoint']
self.throttleCommandMsg = msg
def publishThrottleCommand(self):
if not self.throttleStreaming:
return
if self.throttleCommandMsg is None:
return
lcmUtils.publish(self.throttlePublishChannel, self.throttleCommandMsg)
def publishSteeringCommand(self):
if not self.steeringStreaming:
return
if self.steeringCommandMsg is None:
return
lcmUtils.publish(self.steeringPublishChannel, self.steeringCommandMsg)
def onSteeringCommand(self, msg):
steeringAngle = -msg.steering_angle
lwyPositionGoal = steeringAngle + self.steeringAngleOffset
msg = lcmdrc.joint_position_goal_t()
msg.utime = getUtime()
msg.joint_position = lwyPositionGoal
msg.joint_name = self.drivingThrottleJoint
self.steeringCommandMsg = msg
def decodeThrottleMessage(self,msg):
slider = np.zeros(4)
slider[0] = msg.slider1
slider[1] = msg.slider2
slider[2] = msg.slider3
slider[3] = msg.slider4
return slider
def captureRobotPoseFromStreaming(self):
helper = lcmUtils.MessageResponseHelper(self.commandStreamChannel, bot_core.robot_state_t)
msg = helper.waitForResponse(timeout=1000, keepAlive=False)
if msg is None:
print "Didn't receive a JOINT_POSITION_GOAL message"
print "Are you streaming?"
return None
pose = robotstate.convertStateMessageToDrakePose(msg)
return pose
def planCarEntryPose(self):
ikPlanner = self.robotSystem.ikPlanner
startPose = self.getPlanningStartPose()
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'car_entry_new')
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False)
self.addPlan(plan)
def planArmsEgressPrep(self):
startPose = self.getPlanningStartPose()
ikPlanner = self.robotSystem.ikPlanner
ikParameters = IkParameters(maxDegreesPerSecond=60)
midPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'pre_egress_left_arm', side='left')
midPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'pre_egress_right_arm', side='right')
# endPose = ikPlanner.getMergedPostureFromDatabase(midPose, 'driving', 'egress-arms')
# plan = ikPlanner.computeMultiPostureGoal([startPose, midPose, endPose], feetOnGround=False, ikParameters=ikParameters)
plan = ikPlanner.computePostureGoal(startPose, midPose, feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def planArmsEgressStart(self, startPose=None):
if startPose is None:
startPose = self.getPlanningStartPose()
ikPlanner = self.robotSystem.ikPlanner
endPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'egress-left-arm', side='left')
endPose = ikPlanner.getMergedPostureFromDatabase(endPose, 'General', 'crane', side='right')
ikParameters = IkParameters(maxDegreesPerSecond=60)
plan = ikPlanner.computePostureGoal(startPose, endPose, feetOnGround=False, ikParameters=ikParameters)
self.addPlan(plan)
def planArmsEgress(self):
self.planArmsEgressPrep()
armsEgressPrepName = 'qtraj_arms_prep'
self.saveOriginalTraj(armsEgressPrepName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1])
self.planArmsEgressStart(startPose=nextStartPose)
armsEgressStartName = 'qtraj_arms_egress_start'
self.saveOriginalTraj(armsEgressStartName)
ikParameters = IkParameters(usePointwise=False, maxDegreesPerSecond=60)
ikParameters = self.robotSystem.ikPlanner.mergeWithDefaultIkParameters(ikParameters)
listener = self.getManipPlanListener()
_ = self.concatenateAndRescaleTrajectories([armsEgressPrepName, armsEgressStartName], 'qtraj_arms_egress', 'ts', ikParameters)
plan = listener.waitForResponse()
listener.finish()
self.addPlan(plan)
def setSteeringWheelAndWristGraspAngles(self):
self.graspWheelAngle = np.deg2rad(self.userSpecifiedGraspWheelAngleInDegrees)
pose = self.getPlanningStartPose()
self.graspWristAngle = pose[self.lwyIdx]
def getSteeringWheelAngle(self):
if self.graspWristAngle is None or self.graspWheelAngle is None:
# this means wrist and hand haven't been initialized yet
return 0
pose = self.getPlanningStartPose()
lwyAngle = pose[self.lwyIdx]
wheelAngle = self.graspWheelAngle + lwyAngle - self.graspWristAngle
return wheelAngle
# executes regrasp plan, updates graspWristAngle, graspWheelAngle
def updateGraspOffsets(self):
pose = self.getPlanningStartPose()
#now that plan has finished update our graspWristAngle
self.graspWristAngle = pose[self.lwyIdx]
self.graspWheelAngle = self.wheelAngleBeforeReGrasp
def printSteeringWheelAngleInDegrees(self):
print np.rad2deg(self.getSteeringWheelAngle())
def addPlan(self, plan):
self.plans.append(plan)
def getManipPlanListener(self):
responseChannel = 'CANDIDATE_MANIP_PLAN'
responseMessageClass = lcmdrc.robot_plan_w_keyframes_t
return lcmUtils.MessageResponseHelper(responseChannel, responseMessageClass)
def saveOriginalTraj(self, name):
commands = ['%s = qtraj_orig;' % name]
self.robotSystem.ikServer.comm.sendCommands(commands)
def concatenateAndRescaleTrajectories(self, trajectoryNames, concatenatedTrajectoryName, junctionTimesName, ikParameters):
commands = []
commands.append('joint_v_max = repmat(%s*pi/180, r.getNumVelocities()-6, 1);' % ikParameters.maxDegreesPerSecond)
commands.append('xyz_v_max = repmat(%s, 3, 1);' % ikParameters.maxBaseMetersPerSecond)
commands.append('rpy_v_max = repmat(%s*pi/180, 3, 1);' % ikParameters.maxBaseRPYDegreesPerSecond)
commands.append('v_max = [xyz_v_max; rpy_v_max; joint_v_max];')
commands.append("max_body_translation_speed = %r;" % ikParameters.maxBodyTranslationSpeed)
commands.append("max_body_rotation_speed = %r;" % ikParameters.maxBodyRotationSpeed)
commands.append('rescale_body_ids = [%s];' % (','.join(['links.%s' % linkName for linkName in ikParameters.rescaleBodyNames])))
commands.append('rescale_body_pts = reshape(%s, 3, []);' % ikconstraints.ConstraintBase.toColumnVectorString(ikParameters.rescaleBodyPts))
commands.append("body_rescale_options = struct('body_id',rescale_body_ids,'pts',rescale_body_pts,'max_v',max_body_translation_speed,'max_theta',max_body_rotation_speed,'robot',r);")
commands.append('trajectories = {};')
for name in trajectoryNames:
commands.append('trajectories{end+1} = %s;' % name)
commands.append('[%s, %s] = concatAndRescaleTrajectories(trajectories, v_max, %s, %s, body_rescale_options);' % (concatenatedTrajectoryName, junctionTimesName, ikParameters.accelerationParam, ikParameters.accelerationFraction))
commands.append('s.publishTraj(%s, 1);' % concatenatedTrajectoryName)
self.robotSystem.ikServer.comm.sendCommands(commands)
return self.robotSystem.ikServer.comm.getFloatArray(junctionTimesName)
class DrivingPlannerPanel(TaskUserPanel):
def __init__(self, robotSystem):
TaskUserPanel.__init__(self, windowTitle='Driving Task')
self.robotSystem = robotSystem
self.drivingPlanner = DrivingPlanner(robotSystem.ikServer, robotSystem)
self.addDefaultProperties()
self.addButtons()
self.addTasks()
self.apriltagSub = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag)
self.imageView = cameraview.CameraImageView(cameraview.imageManager, 'CAMERACHEST_RIGHT', 'right image view')
self.imageViewLeft = cameraview.CameraImageView(cameraview.imageManager, 'MULTISENSE_CAMERA_LEFT', 'left image view')
self.imageView.view.orientationMarkerWidget().Off()
self.imageView.view.backgroundRenderer().SetBackground([0,0,0])
self.imageView.view.backgroundRenderer().SetBackground2([0,0,0])
self.imageViewLeft.view.orientationMarkerWidget().Off()
self.imageViewLeft.view.backgroundRenderer().SetBackground([0,0,0])
self.imageViewLeft.view.backgroundRenderer().SetBackground2([0,0,0])
self.affordanceUpdater = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageView)
self.affordanceUpdaterLeft = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, self.imageViewLeft)
self.affordanceUpdater.prependImageName = True
self.affordanceUpdaterLeft.prependImageName = True
self.affordanceUpdater.projectAffordances = False
self.affordanceUpdaterLeft.projectAffordances = False
self.imageViewLayout.addWidget(self.imageView.view)
self.imageViewLayout.addWidget(self.imageViewLeft.view)
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.updateAndDrawTrajectory
def onAprilTag(self, msg):
cameraview.imageManager.queue.getTransform('april_tag_car_beam', 'local', msg.utime, self.drivingPlanner.tagToLocalTransform)
def addButtons(self):
self.addManualButton('Start', self.onStart)
self.addManualButton('Update Wheel Location', self.onUpdateWheelLocation)
self.addManualButton('Plan Safe', self.onPlanSafe)
self.addManualButton('Plan Pre Grasp', self.onPlanPreGrasp)
self.addManualButton('Plan Touch', self.onPlanTouch)
self.addManualButton('Plan Retract', self.onPlanRetract)
self.addManualButton('Plan Turn', self.onPlanTurn)
self.addManualButton('Plan Wheel Re-Grasp', self.drivingPlanner.planSteeringWheelReGrasp)
self.addManualButton('Plan Bar Grab', self.onPlanBarGrasp)
self.addManualButton('Plan Bar Retract', self.onPlanBarRetract)
# self.addManualButton('Plan Steering Wheel Turn', self.onPlanSteeringWheelTurn)
# self.addManualButton('Plan Seed', self.drivingPlanner.planSeed)
# self.addManualButton('Capture Ankle Angle Low', functools.partial(self.drivingPlanner.captureAnklePosition, 0))
# self.addManualButton('Capture Ankle Angle High', functools.partial(self.drivingPlanner.captureAnklePosition, 1))
self.addManualButton('Capture Wheel and Wrist grasp angles', self.drivingPlanner.setSteeringWheelAndWristGraspAngles)
self.addManualButton('Print Steering Wheel Angle', self.drivingPlanner.printSteeringWheelAngleInDegrees)
self.addManualSpacer()
self.addManualButton('Arms Egress Prep', self.drivingPlanner.planArmsEgressPrep)
self.addManualButton('Arms Egress Start', self.drivingPlanner.planArmsEgressStart)
self.addManualButton('Plan Left Leg Egress Start', self.drivingPlanner.planLegEgressStart)
def addDefaultProperties(self):
self.params.addProperty('PreGrasp/Retract Depth', 0.2, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('Touch Depth', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=3))
self.params.addProperty('PreGrasp Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turn Angle', 0, attributes=om.PropertyAttributes(singleStep=10))
# self.params.addProperty('Steering Wheel Radius (meters)', 0.1873, attributes=om.PropertyAttributes(singleStep=0.01))
# self.params.addProperty('Knot Points', 20, attributes=om.PropertyAttributes(singleStep=1))
# self.params.addProperty('Gaze Constraint Tol', 0.3, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
self.params.addProperty('Position Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Quat Constraint Tol', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Grasp Location', 0, attributes=om.PropertyAttributes(enumNames=['Center','Rim']))
self.params.addProperty('Seed with current posture', 0, attributes=om.PropertyAttributes(enumNames=['False','True']))
self.params.addProperty('Speed', 0.75, attributes=om.PropertyAttributes(singleStep=0.1, decimals=2))
# self.params.addProperty('Throttle Idle Angle Slack', 10, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Coarse Grained Throttle Travel', 100, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Fine Grained Throttle Travel', 30, attributes=om.PropertyAttributes(singleStep=1))
self.params.addProperty('Throttle Streaming', False)
self.params.addProperty('Steering Streaming', False)
self.params.addProperty('Bar Grasp/Retract Depth', 0.1, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Pedal Foot Location', 1, attributes=om.PropertyAttributes(enumNames=['Standard','Knee In']))
self.params.addProperty('Steering Wheel Angle when Grasped', 0, attributes=om.PropertyAttributes(singleStep=10))
self.params.addProperty('Turning Radius', 9.5, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Wheel Separation', 1.4, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Segments', 25, attributes=om.PropertyAttributes(singleStep=1, decimals=0))
self.params.addProperty('Trajectory X Offset', 0.0, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2)),
self.params.addProperty('Trajectory Y Offset', 0.30, attributes=om.PropertyAttributes(singleStep=0.01, decimals=2))
self.params.addProperty('Trajectory Angle Offset', 0.0, attributes=om.PropertyAttributes(singleStep=1, decimals=0)),
self.params.addProperty('Show Trajectory', False)
self.params.addProperty('Show Driving/Regrasp Tasks',0, attributes=om.PropertyAttributes(enumNames=['Ingress','Regrasp', 'Egress']))
self._syncProperties()
def _syncProperties(self):
self.preGraspDepth = self.params.getProperty('PreGrasp/Retract Depth')
self.touchDepth = self.params.getProperty('Touch Depth')
self.preGraspAngle = self.params.getProperty('PreGrasp Angle')
self.turnAngle = self.params.getProperty('Turn Angle')
self.speed = self.params.getProperty('Speed')
self.turnRadius = 0.18 #self.params.getProperty('Steering Wheel Radius (meters)')
self.knotPoints = 20
self.gazeTol = 0.3
self.drivingPlanner.positionTol = 0.0
self.drivingPlanner.quatTol = 0.0
self.graspLocation = 'center'
self.drivingPlanner.seedWithCurrent = self.params.getProperty('Seed with current posture')
# self.drivingPlanner.throttleIdleAngleSlack = self.params.getProperty('Throttle Idle Angle Slack')
self.drivingPlanner.fineGrainedThrottleTravel = self.params.getProperty('Fine Grained Throttle Travel')
self.drivingPlanner.coarseGrainedThrottleTravel = self.params.getProperty('Coarse Grained Throttle Travel')
self.drivingPlanner.throttleStreaming = self.params.getProperty('Throttle Streaming')
self.drivingPlanner.steeringStreaming = self.params.getProperty('Steering Streaming')
self.barGraspDepth = self.params.getProperty('Bar Grasp/Retract Depth')
self.drivingPlanner.maxTurningRadius = self.params.getProperty('Turning Radius')
self.drivingPlanner.userSpecifiedGraspWheelAngleInDegrees = self.params.getProperty('Steering Wheel Angle when Grasped')
self.drivingPlanner.trajSegments = self.params.getProperty('Trajectory Segments')
self.drivingPlanner.wheelDistance = self.params.getProperty('Wheel Separation')
self.drivingPlanner.trajectoryX = self.params.getProperty('Trajectory X Offset')
self.drivingPlanner.trajectoryY = self.params.getProperty('Trajectory Y Offset')
self.drivingPlanner.trajectoryAngle = self.params.getProperty('Trajectory Angle Offset')
self.drivingPlanner.kneeInPedal = self.params.getProperty('Pedal Foot Location')
self.taskToShow = self.params.getProperty('Show Driving/Regrasp Tasks')
self.drivingPlanner.applyProperties()
def onSteeringCommand(self, msg):
if msg.type == msg.TYPE_DRIVE_DELTA_STEERING:
self.drivingPlanner.steeringAngleDegrees = math.degrees(msg.steering_angle)
def onStart(self):
self.onUpdateWheelLocation()
print('Driving Planner Ready')
def onUpdateWheelLocation(self):
f = om.findObjectByName('Steering Wheel').getChildFrame().transform
xyzquat = transformUtils.poseFromTransform(f)
xyzquat = np.concatenate(xyzquat)
self.drivingPlanner.updateWheelTransform(xyzquat)
def onPlanSafe(self):
self.drivingPlanner.planSafe()
def onPlanPreGrasp(self, depth=None):
self.drivingPlanner.planPreGrasp(depth=self.preGraspDepth, speed=self.speed, angle=self.preGraspAngle,
graspLocation=self.graspLocation, turnRadius=self.turnRadius)
def onPlanTouch(self):
self._syncProperties()
self.drivingPlanner.planTouch(depth=self.touchDepth, speed=self.speed)
def onPlanRetract(self):
self._syncProperties()
self.drivingPlanner.planRetract(depth=self.preGraspDepth, speed=self.speed)
def onPlanTurn(self):
self._syncProperties()
self.drivingPlanner.planTurn(angle=self.turnAngle, speed=self.speed)
def onPlanSteeringWheelTurn(self):
self._syncProperties()
self.drivingPlanner.planSteeringWheelTurn(speed=self.speed, turnRadius=self.turnRadius, knotPoints=self.knotPoints, gazeTol=self.gazeTol)
def onPropertyChanged(self, propertySet, propertyName):
taskToShowOld = self.taskToShow
self._syncProperties()
if not taskToShowOld == self.taskToShow:
self.addTasks()
if propertyName == 'Throttle Streaming':
if self.params.getProperty(propertyName):
self.drivingPlanner.throttleCommandTimer.start()
else:
self.drivingPlanner.throttleCommandTimer.stop()
elif propertyName == 'Steering Streaming':
if self.params.getProperty(propertyName):
self.drivingPlanner.steeringCommandTimer.start()
else:
self.drivingPlanner.steeringCommandTimer.stop()
elif propertyName == 'Show Trajectory':
if self.params.getProperty(propertyName):
self.timer.start()
self.affordanceUpdater.timer.start()
self.affordanceUpdaterLeft.timer.start()
else:
self.timer.stop()
self.affordanceUpdater.cleanUp()
self.affordanceUpdaterLeft.cleanUp()
self.affordanceUpdater.extraObjects = []
self.affordanceUpdaterLeft.extraObjects = []
om.removeFromObjectModel(om.findObjectByName('driving trajectory'))
def onPlanBarRetract(self):
self.drivingPlanner.planBarRetract(depth=self.barGraspDepth, useLineConstraint=True)
def onPlanBarGrasp(self):
self.drivingPlanner.planBarGrasp(depth=self.barGraspDepth, useLineConstraint=True)
def setParamsPreGrasp1(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.22)
def setParamsPreGrasp2(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.12)
def setParamsWheelRetract(self):
self.params.setProperty('PreGrasp/Retract Depth', 0.3)
def setParamsBarRetract(self):
self.params.setProperty('Bar Grasp/Retract Depth', 0.3)
def setParamsBarGrasp(self):
self.params.setProperty('Bar Grasp/Retract Depth', -0.015)
def startSteering(self):
self.params.setProperty('Steering Streaming', 1)
def stopSteering(self):
self.params.setProperty('Steering Streaming', 0)
def stopStreaming(self):
self.params.setProperty('Steering Streaming', 0)
self.params.setProperty('Throttle Streaming', 0)
def addTasks(self):
self.taskTree.removeAllTasks()
if self.taskToShow == 0:
self.addIngressTasks()
elif self.taskToShow == 1:
self.addRegraspTasks()
elif self.addEgressTasks() == 2:
self.addEgressTasks()
else:
return
def addIngressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTaskMatlab(name, planFunc, userPrompt=False, parentFolder=None):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve and commit manipulation plan.'))
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
prep = addFolder('Prep')
addTask(rt.UserPromptTask(name="confirm user mode", message="Please go to User mode"))
addTask(rt.UserPromptTask(name="Confirm pressure", message='Confirm that pressure is set for ingress (2000 psi)'))
addTask(rt.UserPromptTask(name="start streaming", message="Please start streaming"))
addManipTask('car entry posture', self.drivingPlanner.planCarEntryPose, userPrompt=True)
self.folder = prep
addTask(rt.SetNeckPitch(name='set neck position', angle=30))
addFunc(self.drivingPlanner.captureLeftFootToRightFootTransform, 'capture lfoot to rfoot transform')
addTask(rt.UserPromptTask(name="spawn polaris model", message="launch egress planner and spawn polaris model"))
addFunc(self.onStart, 'update wheel location')
graspWheel = addFolder('Grasp Steering Wheel')
addTask(rt.OpenHand(name='open left hand', side='Left'))
addFunc(self.setParamsPreGrasp1, 'set params')
addManipTask('Pre Grasp 1', self.onPlanPreGrasp, userPrompt=True)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please ask field team for hand location relative to wheel, adjust wheel affordance if necessary"))
addFunc(self.setParamsPreGrasp2, 'set params')
addManipTask('Pre Grasp 2', self.onPlanPreGrasp, userPrompt=True)
self.folder = graspWheel
addTask(rt.UserPromptTask(name="check alignment", message="Please make any manual adjustments if necessary"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addTask(rt.UserPromptTask(name="set true steering wheel angle", message="Set true steering wheel angle in spin box"))
addFunc(self.drivingPlanner.setSteeringWheelAndWristGraspAngles, 'capture true wheel angle and current wrist angle')
graspBar = addFolder('Grasp Bar')
addTask(rt.OpenHand(name='open right hand', side='Right'))
addFunc(self.setParamsBarGrasp, 'set params')
addManipTask('Bar Grasp', self.onPlanBarGrasp, userPrompt=True)
self.folder = graspBar
addTask(rt.UserPromptTask(name="check alignment and depth", message="Please check alignment and depth, make any manual adjustments"))
addTask(rt.CloseHand(name='close Right hand', side='Right'))
footToDriving = addFolder('Foot to Driving Pose')
addManipTask('Foot Up', self.drivingPlanner.planLegUp, userPrompt=True)
self.folder = footToDriving
addManipTask('Swing leg in', self.drivingPlanner.planLegSwingIn , userPrompt=True)
self.folder = footToDriving
addManipTask('Foot On Pedal', self.drivingPlanner.planLegPedal, userPrompt=True)
driving = addFolder('Driving')
addTask(rt.UserPromptTask(name="launch drivers", message="Please launch throttle and steering drivers"))
addTask(rt.UserPromptTask(name="switch to regrasp tasks", message="Switch to regrasp task set"))
def addEgressTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
addFolder('Stop throttle and steering')
addFunc(self.stopStreaming, 'stop steering and throttle streaming')
addTask(rt.UserPromptTask(name="Confirm steering and throttle streaming is off", message='Confirm steering and throttle streaming is off, move sliders and wheel to check'))
# footToEgress = addFolder('Foot to Egress Pose')
self.folder = None
addManipTask('Foot to Egress Start', self.drivingPlanner.planLegEgressFull, userPrompt=True)
# addManipTask('Foot Off Pedal', self.drivingPlanner.planLegAbovePedal, userPrompt=True)
# self.folder = footToEgress
# addManipTask('Swing leg out', self.drivingPlanner.planLegSwingOut , userPrompt=True)
# self.folder = footToEgress
# addManipTask('Foot Down', self.drivingPlanner.planLegEgressStart, userPrompt=True)
addFunc(self.onUpdateWheelLocation, 'Update wheel location')
ungraspWheel = addFolder('Ungrasp Steering Wheel')
addTask(rt.UserPromptTask(name="Confirm pressure", message='Confirm that pressure is set for prep-for-egress (2200 psi)'))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addTask(rt.UserPromptTask(name="confirm hand is open", message="Confirm the left hand has opened"))
addFunc(self.setParamsWheelRetract, 'set params')
addManipTask('Retract hand', self.onPlanRetract, userPrompt=True)
self.folder = ungraspWheel
addTask(rt.CloseHand(name='close left hand', side='Left'))
ungraspBar = addFolder('Ungrasp Bar')
addTask(rt.OpenHand(name='open right hand', side='Right'))
addTask(rt.UserPromptTask(name="confirm hand is open", message="Confirm the right hand has opened"))
addFunc(self.setParamsBarRetract, 'set params')
addManipTask('Retract hand', self.onPlanBarRetract, userPrompt=True)
self.folder = ungraspBar
addTask(rt.CloseHand(name='close Right hand', side='Right'))
# armsToEgressStart = addFolder('Arms to Egress Position')
self.folder = None
addManipTask('Arms to Egress', self.drivingPlanner.planArmsEgress, userPrompt=True)
# addManipTask('Arms To Egress Prep', self.drivingPlanner.planArmsEgressPrep, userPrompt=True)
# self.folder = armsToEgressStart
# addManipTask('Arms To Egress Start', self.drivingPlanner.planArmsEgressStart, userPrompt=True)
prep = addFolder('Stop Streaming')
addTask(rt.UserPromptTask(name='stop streaming base side', message='stop streaming base side'))
def addRegraspTasks(self):
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTask(name, planFunc, userPrompt=False):
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(dp.commitManipPlan, name='execute manip plan')
addTask(rt.UserPromptTask(name='wait for plan execution', message='Continue when plan finishes.'))
dp = self.drivingPlanner
regrasp = addFolder('Regrasp')
addFunc(self.stopSteering, 'stop steering commands')
addTask(rt.UserPromptTask(name="high pressure", message="set pump to 2400 psi"))
addFunc(self.onUpdateWheelLocation, 'update wheel location')
addFunc(self.drivingPlanner.captureHandPose, 'capture hand pose')
addTask(rt.UserPromptTask(name="approve open left hand", message="Check ok to open left hand"))
addTask(rt.OpenHand(name='open left hand', side='Left'))
addTask(rt.UserPromptTask(name="confirm hand is open", message="Confirm the left hand has opened"))
addFunc(self.setParamsWheelRetract, 'set params')
addManipTask('Retract hand', self.onPlanRetract, userPrompt=True)
self.folder = regrasp
addManipTask('Plan Regrasp', self.drivingPlanner.planSteeringWheelReGrasp, userPrompt=True)
self.folder = regrasp
addTask(rt.UserPromptTask(name="approve close left hand", message="Check ok to close left hand"))
addTask(rt.CloseHand(name='close left hand', side='Left'))
addFunc(self.drivingPlanner.updateGraspOffsets, 'update steering wheel grasp offsets')
addTask(rt.UserPromptTask(name="driving pressure", message="set pump to 1500 psi"))
addTask(rt.UserPromptTask(name="reset steering wheel", message="Set the steering wheel to approximate lwy angle"))
addFunc(self.startSteering, 'start steering commands')
def updateAndDrawTrajectory(self):
if not self.params.getProperty('Show Trajectory') or om.findObjectByName('Steering Wheel') is None:
return None
steeringAngleDegrees = np.rad2deg(self.drivingPlanner.getSteeringWheelAngle())
leftTraj, rightTraj = self.drivingPlanner.computeDrivingTrajectories(steeringAngleDegrees, self.drivingPlanner.maxTurningRadius, self.drivingPlanner.trajSegments + 1)
d = DebugData()
for traj in [leftTraj, rightTraj]:
traj = self.drivingPlanner.transformDrivingTrajectory(traj)
numTrajPoints = len(traj)
for i in xrange(numTrajPoints):
rgb = [(numTrajPoints - i) / float(numTrajPoints), 1 - (numTrajPoints - i) / float(numTrajPoints), 1]
d.addSphere(traj[i], 0.05, rgb, resolution=12)
obj = vis.updatePolyData(d.getPolyData(), 'driving trajectory', colorByName='RGB255', parent='planning')
for updater in [self.affordanceUpdater, self.affordanceUpdaterLeft]:
updater.extraObjects = [obj]
return obj
|
patmarion/director
|
src/python/director/drivingplanner.py
|
Python
|
bsd-3-clause
| 69,117
|
[
"VTK"
] |
4f8f466808d21e6b04dae7be8140f3fccd15f348b28280d37a56c46af5065893
|
#
# QAPI command marshaller generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def type_visitor(name):
if type(name) == list:
return 'visit_type_%sList' % name[0]
else:
return 'visit_type_%s' % name
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void %(visitor)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
visitor=type_visitor(name))
def generate_command_decl(name, args, ret_type):
arglist=""
for argname, argtype, optional, structured in parse_args(args):
argtype = c_type(argtype)
if argtype == "char *":
argtype = "const char *"
if optional:
arglist += "bool has_%s, " % c_var(argname)
arglist += "%s %s, " % (argtype, c_var(argname))
return mcgen('''
%(ret_type)s qmp_%(name)s(%(args)sError **errp);
''',
ret_type=c_type(ret_type), name=c_var(name), args=arglist).strip()
def gen_sync_call(name, args, ret_type, indent=0):
ret = ""
arglist=""
retval=""
if ret_type:
retval = "retval = "
for argname, argtype, optional, structured in parse_args(args):
if optional:
arglist += "has_%s, " % c_var(argname)
arglist += "%s, " % (c_var(argname))
push_indent(indent)
ret = mcgen('''
%(retval)sqmp_%(name)s(%(args)serrp);
''',
name=c_var(name), args=arglist, retval=retval).rstrip()
if ret_type:
ret += "\n" + mcgen(''''
if (!error_is_set(errp)) {
%(marshal_output_call)s
}
''',
marshal_output_call=gen_marshal_output_call(name, ret_type)).rstrip()
pop_indent(indent)
return ret.rstrip()
def gen_marshal_output_call(name, ret_type):
if not ret_type:
return ""
return "qmp_marshal_output_%s(retval, ret, errp);" % c_var(name)
def gen_visitor_output_containers_decl(ret_type):
ret = ""
push_indent()
if ret_type:
ret += mcgen('''
QmpOutputVisitor *mo;
QapiDeallocVisitor *md;
Visitor *v;
''')
pop_indent()
return ret
def gen_visitor_input_containers_decl(args):
ret = ""
push_indent()
if len(args) > 0:
ret += mcgen('''
QmpInputVisitor *mi;
QapiDeallocVisitor *md;
Visitor *v;
''')
pop_indent()
return ret.rstrip()
def gen_visitor_input_vars_decl(args):
ret = ""
push_indent()
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
bool has_%(argname)s = false;
''',
argname=c_var(argname))
if c_type(argtype).endswith("*"):
ret += mcgen('''
%(argtype)s %(argname)s = NULL;
''',
argname=c_var(argname), argtype=c_type(argtype))
else:
ret += mcgen('''
%(argtype)s %(argname)s;
''',
argname=c_var(argname), argtype=c_type(argtype))
pop_indent()
return ret.rstrip()
def gen_visitor_input_block(args, obj, dealloc=False):
ret = ""
if len(args) == 0:
return ret
push_indent()
if dealloc:
ret += mcgen('''
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
''')
else:
ret += mcgen('''
mi = qmp_input_visitor_new(%(obj)s);
v = qmp_input_get_visitor(mi);
''',
obj=obj)
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
visit_start_optional(v, &has_%(c_name)s, "%(name)s", errp);
if (has_%(c_name)s) {
''',
c_name=c_var(argname), name=argname)
push_indent()
ret += mcgen('''
%(visitor)s(v, &%(c_name)s, "%(name)s", errp);
''',
c_name=c_var(argname), name=argname, argtype=argtype,
visitor=type_visitor(argtype))
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(v, errp);
''')
if dealloc:
ret += mcgen('''
qapi_dealloc_visitor_cleanup(md);
''')
else:
ret += mcgen('''
qmp_input_visitor_cleanup(mi);
''')
pop_indent()
return ret.rstrip()
def gen_marshal_output(name, args, ret_type, middle_mode):
if not ret_type:
return ""
ret = mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_ret_type)s ret_in, QObject **ret_out, Error **errp)
{
QapiDeallocVisitor *md = qapi_dealloc_visitor_new();
QmpOutputVisitor *mo = qmp_output_visitor_new();
Visitor *v;
v = qmp_output_get_visitor(mo);
%(visitor)s(v, &ret_in, "unused", errp);
if (!error_is_set(errp)) {
*ret_out = qmp_output_get_qobject(mo);
}
qmp_output_visitor_cleanup(mo);
v = qapi_dealloc_get_visitor(md);
%(visitor)s(v, &ret_in, "unused", errp);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_ret_type=c_type(ret_type), c_name=c_var(name),
visitor=type_visitor(ret_type))
return ret
def gen_marshal_input_decl(name, args, ret_type, middle_mode):
if middle_mode:
return 'int qmp_marshal_input_%s(Monitor *mon, const QDict *qdict, QObject **ret)' % c_var(name)
else:
return 'static void qmp_marshal_input_%s(QDict *args, QObject **ret, Error **errp)' % c_var(name)
def gen_marshal_input(name, args, ret_type, middle_mode):
hdr = gen_marshal_input_decl(name, args, ret_type, middle_mode)
ret = mcgen('''
%(header)s
{
''',
header=hdr)
if middle_mode:
ret += mcgen('''
Error *local_err = NULL;
Error **errp = &local_err;
QDict *args = (QDict *)qdict;
''')
if ret_type:
if c_type(ret_type).endswith("*"):
retval = " %s retval = NULL;" % c_type(ret_type)
else:
retval = " %s retval;" % c_type(ret_type)
ret += mcgen('''
%(retval)s
''',
retval=retval)
if len(args) > 0:
ret += mcgen('''
%(visitor_input_containers_decl)s
%(visitor_input_vars_decl)s
%(visitor_input_block)s
''',
visitor_input_containers_decl=gen_visitor_input_containers_decl(args),
visitor_input_vars_decl=gen_visitor_input_vars_decl(args),
visitor_input_block=gen_visitor_input_block(args, "QOBJECT(args)"))
else:
ret += mcgen('''
(void)args;
''')
ret += mcgen('''
if (error_is_set(errp)) {
goto out;
}
%(sync_call)s
''',
sync_call=gen_sync_call(name, args, ret_type, indent=4))
ret += mcgen('''
out:
''')
ret += mcgen('''
%(visitor_input_block_cleanup)s
''',
visitor_input_block_cleanup=gen_visitor_input_block(args, None,
dealloc=True))
if middle_mode:
ret += mcgen('''
if (local_err) {
qerror_report_err(local_err);
error_free(local_err);
return -1;
}
return 0;
''')
else:
ret += mcgen('''
return;
''')
ret += mcgen('''
}
''')
return ret
def gen_registry(commands):
registry=""
push_indent()
for cmd in commands:
registry += mcgen('''
qmp_register_command("%(name)s", qmp_marshal_input_%(c_name)s);
''',
name=cmd['command'], c_name=c_var(cmd['command']))
pop_indent()
ret = mcgen('''
static void qmp_init_marshal(void)
{
%(registry)s
}
qapi_init(qmp_init_marshal);
''',
registry=registry.rstrip())
return ret
def gen_command_decl_prologue(header, guard, prefix=""):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI function prototypes
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "%(prefix)sqapi-types.h"
#include "error.h"
''',
header=basename(header), guard=guardname(header), prefix=prefix)
return ret
def gen_command_def_prologue(prefix="", proxy=False):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QMP->QAPI command dispatch
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qemu-objects.h"
#include "qapi/qmp-core.h"
#include "qapi/qapi-visit-core.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-input-visitor.h"
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''',
prefix=prefix)
if not proxy:
ret += '#include "%sqmp-commands.h"' % prefix
return ret + "\n\n"
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:m",
["source", "header", "prefix=",
"output-dir=", "type=", "middle"])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
dispatch_type = "sync"
c_file = 'qmp-marshal.c'
h_file = 'qmp-commands.h'
middle_mode = False
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-t", "--type"):
dispatch_type = a
elif o in ("-m", "--middle"):
middle_mode = True
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
exprs = parse_schema(sys.stdin)
commands = filter(lambda expr: expr.has_key('command'), exprs)
commands = filter(lambda expr: not expr.has_key('gen'), commands)
if dispatch_type == "sync":
fdecl = maybe_open(do_h, h_file, 'w')
fdef = maybe_open(do_c, c_file, 'w')
ret = gen_command_decl_prologue(header=basename(h_file), guard=guardname(h_file), prefix=prefix)
fdecl.write(ret)
ret = gen_command_def_prologue(prefix=prefix)
fdef.write(ret)
for cmd in commands:
arglist = []
ret_type = None
if cmd.has_key('data'):
arglist = cmd['data']
if cmd.has_key('returns'):
ret_type = cmd['returns']
ret = generate_command_decl(cmd['command'], arglist, ret_type) + "\n"
fdecl.write(ret)
if ret_type:
ret = gen_marshal_output(cmd['command'], arglist, ret_type, middle_mode) + "\n"
fdef.write(ret)
if middle_mode:
fdecl.write('%s;\n' % gen_marshal_input_decl(cmd['command'], arglist, ret_type, middle_mode))
ret = gen_marshal_input(cmd['command'], arglist, ret_type, middle_mode) + "\n"
fdef.write(ret)
fdecl.write("\n#endif\n");
if not middle_mode:
ret = gen_registry(commands)
fdef.write(ret)
fdef.flush()
fdef.close()
fdecl.flush()
fdecl.close()
|
AVEx-6502/qemu-6502
|
scripts/qapi-commands.py
|
Python
|
gpl-2.0
| 11,725
|
[
"VisIt"
] |
9ef06d25e1e0841402fbba24ab7f2ecfd762bf08f8ed2cad206891b6a50cde71
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from . import _criterion
from . import _tree
from ..externals import six
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/tree/export.py
|
Python
|
mit
| 15,893
|
[
"Brian"
] |
04e67b23ec938c124123f8f014d45560b5e45b74a75dc4e49d69b597cb9a5359
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Utilities."""
import io
from collections.abc import Iterable, Mapping
from copy import deepcopy
def _to_camel_case(string):
"""Switch from snake to camelcase strict.
Note:
This currently capitalizes the first word which is not correct
camelcase.
"""
return string.replace('_', ' ').title().replace(' ', '')
def _is_iterable(obj):
"""Returns True if object is iterable and not a str or dict."""
return isinstance(obj, Iterable) and not _bad_iterable_type(obj)
def _bad_iterable_type(obj):
"""Returns True if str, dict, or IO (file) type."""
return isinstance(obj, (str, dict, io.IOBase))
def dict_map(dict_, func):
r"""Perform a recursive map on a nested mapping.
Args:
dict\_ (dict): The nested mapping to perform the map on.
func (callable): A callable taking in one value use to map over
dictionary values.
Returns:
dict : A `dict` that has the same keys as the passed in ``dict_`` but
with the values modified by ``func``.
Note:
This can be useful for handling dictionaries returned by
`hoomd.logging.Logger.log`.
"""
new_dict = dict()
for key, value in dict_.items():
if isinstance(value, Mapping):
new_dict[key] = dict_map(value, func)
else:
new_dict[key] = func(value)
return new_dict
def dict_fold(dict_, func, init_value, use_keys=False):
r"""Perform a recursive fold on a nested mapping's values or keys.
A fold is for a unnested mapping looks as follows.
.. code-block:: python
mapping = {'a': 0, 'b': 1, 'c': 2}
accumulated_value = 0
func = lambda x, y: x + y
for value in mapping.values():
accumulated_value = func(accumulated_value, value)
Args:
dict\_ (dict): The nested mapping to perform the map on.
func (callable): A callable taking in one value use to fold over
dictionary values or keys if ``use_keys`` is set.
init_value: An initial value to use for the fold.
use_keys (bool, optional): If true use keys instead of values for the
fold. Defaults to ``False``.
Returns:
The final value of the fold.
"""
final_value = init_value
for key, value in dict_.items():
if isinstance(value, dict):
final_value = dict_fold(value, func, final_value)
else:
if use_keys:
final_value = func(key, final_value)
else:
final_value = func(value, final_value)
return final_value
def dict_flatten(dict_):
r"""Flattens a nested mapping into a flat mapping.
Args:
dict\_ (dict): The nested mapping to flatten.
Returns:
dict: The flattened mapping as a `dict`.
Note:
This can be useful for handling dictionaries returned by
`hoomd.logging.Logger.log`.
"""
return _dict_flatten(dict_, None)
def _dict_flatten(value, key):
if key is None:
new_dict = dict()
for key, inner in value.items():
new_dict.update(_dict_flatten(inner, (key,)))
return new_dict
elif not isinstance(value, dict):
return {key: value}
else:
new_dict = dict()
for k, val in value.items():
new_dict.update(_dict_flatten(val, key + (k,)))
return new_dict
def dict_filter(dict_, filter_):
r"""Perform a recursive filter on a nested mapping.
Args:
dict\_ (dict): The nested mapping to perform the filter on.
func (callable): A callable taking in one value use to filter over
mapping values.
Returns:
dict : A `dict` that has the same keys as the passed in ``dict_`` with
key value pairs with values that the filter returned ``False`` for
removed.
Note:
This can be useful for handling dictionaries returned by
`hoomd.logging.Logger.log`.
"""
new_dict = dict()
for key in dict_:
if not isinstance(dict_[key], Mapping):
if filter_(dict_[key]):
new_dict[key] = dict_[key]
else:
sub_dict = dict_filter(dict_[key], filter_)
if sub_dict:
new_dict[key] = sub_dict
return new_dict
class _NamespaceDict:
"""A nested dictionary when can be nested indexed by tuples."""
def __init__(self, dict_=None):
self._dict = dict() if dict_ is None else dict_
def __len__(self):
return dict_fold(self._dict, lambda x, incr: incr + 1, 0)
def keys(self):
raise NotImplementedError
def _pop_namespace(self, namespace):
return (namespace[-1], namespace[:-1])
def _setitem(self, namespace, value):
# Grab parent dictionary creating sub dictionaries as necessary
parent_dict = self._dict
base_name, parent_namespace = self._pop_namespace(namespace)
for name in parent_namespace:
# If key does not exist create key with empty dictionary
try:
parent_dict = parent_dict[name]
except KeyError:
parent_dict[name] = dict()
parent_dict = parent_dict[name]
# Attempt to set the value
parent_dict[base_name] = value
def __setitem__(self, namespace, value):
try:
namespace = self.validate_namespace(namespace)
except ValueError:
raise KeyError("Expected a tuple or string key.")
self._setitem(namespace, value)
def __getitem__(self, namespace):
return self._unsafe_getitem(namespace)
def _unsafe_getitem(self, namespace):
ret_val = self._dict
if isinstance(namespace, str):
namespace = (namespace,)
try:
for name in namespace:
ret_val = ret_val[name]
except (TypeError, KeyError):
raise KeyError("Namespace {} not in dictionary.".format(namespace))
return ret_val
def __delitem__(self, namespace):
"""Does not check that key exists."""
if isinstance(namespace, str):
namespace = (namespace,)
parent_dict = self._unsafe_getitem(namespace[:-1])
del parent_dict[namespace[-1]]
def __contains__(self, namespace):
try:
namespace = self.validate_namespace(namespace)
except ValueError:
return False
current_dict = self._dict
# traverse through dictionary hierarchy
for name in namespace:
try:
if name in current_dict:
current_dict = current_dict[name]
continue
else:
return False
except (TypeError, AttributeError):
return False
return True
def validate_namespace(self, namespace):
if isinstance(namespace, str):
namespace = (namespace,)
if not isinstance(namespace, tuple):
raise ValueError("Expected a string or tuple namespace.")
return namespace
class _SafeNamespaceDict(_NamespaceDict):
"""A _NamespaceDict where keys cannot be overwritten."""
def __setitem__(self, namespace, value):
if namespace in self:
raise KeyError("Namespace {} is being used. Remove before "
"replacing.".format(namespace))
else:
super().__setitem__(namespace, value)
def __getitem__(self, namespace):
return deepcopy(super().__getitem__(namespace))
class GPUNotAvailableError(NotImplementedError):
"""Error for when a GPU specific feature was requested without a GPU."""
pass
class _NoGPU:
"""Used in nonGPU builds of hoomd to raise errors for attempted use."""
def __init__(self, *args, **kwargs):
raise GPUNotAvailableError(
"This build of HOOMD-blue does not support GPUs.")
|
joaander/hoomd-blue
|
hoomd/util.py
|
Python
|
bsd-3-clause
| 8,115
|
[
"HOOMD-blue"
] |
406371437ff2b7a8bdf446b204e8c801629d34f5540f8baabc4f5e361bdc26bd
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mr Smith in Google CP Solver.
From an IF Prolog example (http://www.ifcomputer.de/)
'''
The Smith family and their three children want to pay a visit but they
do not all have the time to do so. Following are few hints who will go
and who will not:
o If Mr Smith comes, his wife will come too.
o At least one of their two sons Matt and John will come.
o Either Mrs Smith or Tim will come, but not both.
o Either Tim and John will come, or neither will come.
o If Matt comes, then John and his father will
also come.
'''
The answer should be:
Mr_Smith_comes = 0
Mrs_Smith_comes = 0
Matt_comes = 0
John_comes = 1
Tim_comes = 1
Compare with the following models:
* ECLiPSe: http://www.hakank.org/eclipse/mr_smith.ecl
* SICStus Prolog: http://www.hakank.org/sicstus/mr_smith.pl
* Gecode: http://www.hakank.org/gecode/mr_smith.cpp
* MiniZinc: http://www.hakank.org/minizinc/mr_smith.mzn
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Mr Smith problem')
#
# data
#
n = 5
#
# declare variables
#
x = [solver.IntVar(0, 1, 'x[%i]' % i) for i in range(n)]
Mr_Smith, Mrs_Smith, Matt, John, Tim = x
#
# constraints
#
#
# I've kept the MiniZinc constraints for clarity
# and debugging.
#
# If Mr Smith comes then his wife will come too.
# (Mr_Smith -> Mrs_Smith)
solver.Add(Mr_Smith - Mrs_Smith <= 0)
# At least one of their two sons Matt and John will come.
# (Matt \/ John)
solver.Add(Matt + John >= 1)
# Either Mrs Smith or Tim will come but not both.
# bool2int(Mrs_Smith) + bool2int(Tim) = 1 /\
# (Mrs_Smith xor Tim)
solver.Add(Mrs_Smith + Tim == 1)
# Either Tim and John will come or neither will come.
# (Tim = John)
solver.Add(Tim == John)
# If Matt comes /\ then John and his father will also come.
# (Matt -> (John /\ Mr_Smith))
solver.Add(Matt - (John * Mr_Smith) <= 0)
#
# solution and search
#
db = solver.Phase(x,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'x:', [x[i].Value() for i in range(n)]
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
if __name__ == '__main__':
main()
|
petesburgh/or-tools
|
examples/python/mr_smith.py
|
Python
|
apache-2.0
| 3,319
|
[
"VisIt"
] |
0fae9d07b268003cc73813a6712a7d10141a4bb1e4a28b4fb7b4371c614ef7c8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::
>>> model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.similarity('woman', 'man')
0.73723527
>>> model['computer'] # raw numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
and so on.
If you're finished training a model (=no more updates, only querying), you can do
>>> model.init_sims(replace=True)
to trim unneeded model memory = use (much) less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformed[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, repeat as np_repeat
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger("gensim.models.word2vec")
try:
from gensim.models.word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
def train_sentence_sg(model, sentence, alpha, work=None):
"""
Update skip-gram model by training on a single sentence.
The sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.randint(2**32)]
for pos, word in enumerate(sentence):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2_vocab in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on OOV words and on the `word` itself
if (pos2 == pos):
train_sg_pair(model, word, word2_vocab.index, alpha)
return len(word_vocabs)
def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a single sentence.
The sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.randint(2**32)]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha)
return len(word_vocabs)
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None):
if context_vectors is None:
context_vectors = model.syn0
if context_locks is None:
context_locks = model.syn0_lockf
if word not in model.vocab:
return
predict_word = model.vocab[word] # target word (NN output)
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
l = len(input_word_indices)
model.syn0[input_word_indices] += np_repeat(neu1e, l).reshape(l, model.vector_size) * \
model.syn0_lockf[input_word_indices][:, None]
return neu1e
# could move this import up to where train_* is imported,
# but for now just do it separately incase there are unforseen bugs in score_
try:
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
except ImportError:
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(sentence[start:(pos + model.window + 1)], start):
# don't train on OOV words and on the `word` itself
if word2 and not (pos2 == pos):
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(sentence[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)
return log_prob_sentence
def score_sg_pair(model, word, word2):
l1 = model.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = -1.0**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
def score_cbow_pair(model, word, word2_indices, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = -1.0**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
"""
def __init__(
self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
sg=1, hs=1, negative=0, cbow_mean=0, hashfxn=hash, iter=1, null_word=0):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=1`), skip-gram is used.
Otherwise, `cbow` is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus.
"""
self.vocab = {} # mapping from a word (string) to a Vocab object
self.index2word = [] # map from a word's matrix index (int) to word (string)
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences)
self.train(sentences)
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
cumulative = 0.0
for word_index in range(vocab_size):
cumulative += self.vocab[self.index2word[word_index]].count**power / train_words_pow
self.cum_table[word_index] = round(cumulative * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words" % len(self.vocab))
# build the huffman tree
heap = list(itervalues(self.vocab))
heapq.heapify(heap)
for i in xrange(len(self.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i" % max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences) # initial survey
self.scale_vocab(keep_raw_vocab) # trim by min_count & precalculate downsampling
self.finalize_vocab() # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
for sentence_no, sentence in enumerate(sentences):
if sentence_no % progress_per == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, sum(itervalues(vocab)) + total_words, len(vocab))
for word in sentence:
vocab[word] += 1
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
total_words += utils.prune_vocab(vocab, min_reduce)
min_reduce += 1
total_words += sum(itervalues(vocab))
logger.info("collected %i word types from a corpus of %i words and %i sentences",
len(vocab), total_words, sentence_no + 1)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
# Discard words less-frequent than min_count
if not dry_run:
self.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.vocab = {}
drop_unique, drop_total, retain_total, original_total = 0, 0, 0, 0
retain_words = []
for word, v in iteritems(self.raw_vocab):
if v >= min_count:
retain_words.append(word)
retain_total += v
original_total += v
if not dry_run:
self.vocab[word] = Vocab(count=v, index=len(self.index2word))
self.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_total += v
logger.info("min_count=%d retains %i unique words (drops %i)"
% (min_count, len(retain_words), drop_unique))
logger.info("min_count leaves %i word corpus (%i%% of original %i)"
% (retain_total, retain_total * 100 / max(original_total, 1), original_total))
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total)
# return from each step: words-affected, resulting-corpus-size
report_values = {'drop_unique': drop_unique, 'retain_total': retain_total,
'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)}
# print extra memory estimates
report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words))
return report_values
def finalize_vocab(self):
"""Build tables and model weights based on final vocabulary settings."""
if not self.index2word:
self.scale_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.vocab)
self.index2word.append(word)
self.vocab[word] = v
# set initial input/projection and hidden weights
self.reset_weights()
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.vocab = other_model.vocab
self.index2word = other_model.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for sentence in job:
if self.sg:
tally += train_sentence_sg(self, sentence, alpha, work)
else:
tally += train_sentence_cbow(self, sentence, alpha, work, neu1)
return tally
def train(self, sentences, total_words=None, word_count=0, chunksize=100, queue_factor=2, report_delay=1):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension not loaded for Word2Vec, training will be slow. "
"Install a C compiler and reinstall gensim for fast training.")
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
logger.info("training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s"
% (self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative))
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not hasattr(self, 'syn0'):
raise RuntimeError("you must first finalize vocabulary before training the model")
if self.iter > 1:
sentences = utils.RepeatCorpusNTimes(sentences, self.iter)
def worker_init():
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return (work, neu1)
def worker_one_job(job, inits):
items, alpha = job
if items is None: # signal to finish
return False
# train & return tally
job_words = self._do_train_job(items, alpha, inits)
progress_queue.put(job_words) # report progress
return True
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
init = worker_init()
while True:
job = job_queue.get()
if not worker_one_job(job, init):
break
start, next_report = default_timer(), 1.0
total_words = total_words or int(sum(v.count * (v.sample_int/2**32) for v in itervalues(self.vocab)) *
self.iter)
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
if self.workers > 0:
job_queue = Queue(maxsize=queue_factor * self.workers)
else:
job_queue = FakeJobQueue(worker_init, worker_one_job)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
pushed_words = 0
push_done = False
done_jobs = 0
next_alpha = self.alpha
jobs_source = enumerate(utils.grouper(sentences, chunksize))
# fill jobs queue with (sentence, alpha) job tuples
while True:
try:
job_no, items = next(jobs_source)
logger.debug("putting job #%i in the queue", job_no)
job_queue.put((items, next_alpha))
# update the learning rate before every job
pushed_words += round((chunksize / (self.corpus_count * self.iter)) * total_words)
next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_words / total_words)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs" % (job_no-done_jobs+1))
for _ in xrange(self.workers):
job_queue.put((None, 0)) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no+1):
word_count += progress_queue.get(push_done) # only block after all jobs pushed
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
est_alpha = self.alpha - (self.alpha - self.min_alpha) * (word_count / total_words)
logger.info("PROGRESS: at %.2f%% words, alpha %.05f, %.0f words/s",
100.0 * word_count / total_words, est_alpha, word_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
logger.info("training on %i words took %.1fs, %.0f words/s" %
(word_count, elapsed, word_count / elapsed if elapsed else 0.0))
self.train_count += 1
self.total_train_time += elapsed
self.clear_sims()
return word_count
def _score_job_words(self, sentence, work, neu1):
if self.sg:
return score_sentence_sg(self, sentence, work)
else:
return score_sentence_cbow(self, sentence, work, neu1)
# basics copied from the train() function
def score(self, sentences, total_sentences=None, chunksize=100):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that)
See the article by Taddy [1] for examples of how to use such scores in document classification.
.. [1] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations, in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness.")
logger.info("scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s"
% (self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative))
if not self.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError("we have only implemented score for hs")
start, next_report = time.time(), [1.0]
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
jobs = Queue(maxsize=2 * self.workers)
lock = threading.Lock() # for shared state (scores, log reports...)
total_sentences = total_sentences or int(1e9)
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
sentence_count = [0]
def worker_score():
"""score the enumerated sentences, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = jobs.get()
if job is None: # data finished, exit
break
ns = 0
for (id, sentence) in job:
sentence_scores[id] = self._score_job_words(sentence, work, neu1)
ns += 1
with lock:
sentence_count[0] += ns
elapsed = time.time() - start
if elapsed >= next_report[0]:
logger.info("PROGRESS: at %i sentences, %.0f sentences/s"
% (sentence_count[0], sentence_count[0] / elapsed if elapsed else 0.0))
next_report[0] = elapsed + 1.0 # wait at least a second between progress reports
workers = [threading.Thread(target=worker_score) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
# convert input strings to Vocab objects and start filling the jobs queue
for job_no, job in enumerate(utils.grouper(enumerate(self._prepare_items(sentences)), chunksize)):
logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
jobs.put(job)
logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())
for _ in xrange(self.workers):
jobs.put(None) # give the workers heads up that they can finish -- no more work!
for thread in workers:
thread.join()
elapsed = time.time() - start
logger.info("scoring %i sentences took %.1fs, %.0f sentences/s"
% (sentence_count[0], elapsed, sentence_count[0] / elapsed if elapsed else 0.0))
self.syn0norm = None
return sentence_scores[:sentence_count[0]]
def clear_sims(self):
self.syn0norm = None
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.syn0 = empty((len(self.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.vocab)):
# construct deterministic seed from word AND seed argument
self.syn0[i] = self.seeded_vector(self.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
self.syn0norm = None
self.syn0_lockf = ones(len(self.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(uint32(self.hashfxn(seed_string)))
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
"""
if fvocab is not None:
logger.info("Storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.vector_size, fname))
assert (len(self.vocab), self.vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True, encoding='utf8'):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
result = Word2Vec(size=vector_size)
result.syn0 = zeros((vocab_size, vector_size), dtype=REAL)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding)
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = fromstring(fin.read(binary_len), dtype=REAL)
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = weights
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
result.init_sims(norm_only)
return result
def intersect_word2vec_format(self, fname, binary=False, encoding='utf8'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
"""
counts = None
overlap_count = 0
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
self.syn0_lockf[self.vocab[word].index] = 0.0 # lock it
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
logger.info("merged %d vectors into %s matrix from %s" % (overlap_count, self.syn0.shape, fname))
def most_similar(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [(word, 1.0) if isinstance(word, string_types + (ndarray,))
else word for word in positive]
negative = [(word, -1.0) if isinstance(word, string_types + (ndarray,))
else word for word in negative]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
raise KeyError("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
dists = dot(self.syn0norm, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = set()
def word_vec(word):
if isinstance(word, ndarray):
return word
elif word in self.vocab:
all_words.add(self.vocab[word].index)
return self.syn0norm[self.vocab[word].index]
else:
raise KeyError("word '%s' not in vocabulary" % word)
positive = [word_vec(word) for word in positive]
negative = [word_vec(word) for word in negative]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, words))[0][1]
def __getitem__(self, word):
"""
Return a word's representations in vector space, as a 1D numpy array.
Example::
>>> trained_model['woman']
array([ -1.40128313e-02, ...]
"""
return self.syn0[self.vocab[word].index]
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
if hasattr(self, 'syn1'):
del self.syn1
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def estimate_memory(self, vocab_size=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.vocab)
report = {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info("estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'])
return report
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word whose frequency
is not in the top-N most frequent words (default top 30,000).
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = dict(sorted(iteritems(self.vocab),
key=lambda item: -item[1].count)[:restrict_vocab])
ok_index = set(v.index for v in itervalues(ok_vocab))
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too...
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line.strip()))
continue
ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False)
for index in matutils.argsort(sims, reverse=True):
if index in ok_index and index not in ignore:
predicted = self.index2word[index]
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (self.__class__.__name__, len(self.index2word), self.vector_size, self.alpha)
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = utils.SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative:
model.make_cum_table() # rebuild cum_table from vocabulary
return model
class FakeJobQueue(object):
"""Pretends to be a Queue; does equivalent of work_loop in calling thread."""
def __init__(self, init_fn, job_fn):
self.inits = init_fn()
self.job_fn = job_fn
def put(self, job):
self.job_fn(job, self.inits)
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=1000):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""Simple format: one sentence = one line; words already preprocessed and separated by whitespace."""
def __init__(self, source, max_sentence_length=10000):
"""
`source` can be either a string or a file object.
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in self.source:
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:(i + self.max_sentence_length)]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in fin:
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:(i + self.max_sentence_length)]
i += self.max_sentence_length
# Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
logging.info("using optimization %s" % FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
infile = sys.argv[1]
from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
# model = Word2Vec(LineSentence(infile), size=200, min_count=5, workers=4)
model = Word2Vec(Text8Corpus(infile), size=200, min_count=5, workers=1)
if len(sys.argv) > 3:
outfile = sys.argv[3]
model.save(outfile + '.model')
model.save_word2vec_format(outfile + '.model.bin', binary=True)
model.save_word2vec_format(outfile + '.model.txt', binary=False)
if len(sys.argv) > 2:
questions_file = sys.argv[2]
model.accuracy(sys.argv[2])
logging.info("finished running %s" % program)
|
vvw/gensim
|
gensim/models/word2vec.py
|
Python
|
gpl-3.0
| 66,200
|
[
"VisIt"
] |
c41e074070927a4de121356ffc2e8701d849f3a25411cae9c3fcc7d6fc007b66
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0=15
n1=15
output = False
display = True
worldSize = El.mpi.WorldSize()
worldRank = El.mpi.WorldRank()
# Place two 2D finite-difference matrices next to each other
# and make the last column dense
def ConcatFD2D(N0,N1):
A = El.DistMatrix(El.zTag)
height = N0*N1
width = 2*N0*N1
El.Zeros(A,height,width)
localHeight = A.LocalHeight()
A.Reserve(11*localHeight)
for iLoc in xrange(localHeight):
i = A.GlobalRow(iLoc)
x0 = i % N0
x1 = i / N0
iRel = i + N0*N1
A.Update( i, i, El.ComplexDouble(1,1) )
A.Update( i, iRel, El.ComplexDouble(20,2) )
if x0 > 0:
A.Update( i, i-1, El.ComplexDouble(-1,3) )
A.Update( i, iRel-1, El.ComplexDouble(-17,4) )
if x0+1 < N0:
A.Update( i, i+1, El.ComplexDouble(2,5) )
A.Update( i, iRel+1, El.ComplexDouble(-20,6) )
if x1 > 0:
A.Update( i, i-N0, El.ComplexDouble(-30,7) )
A.Update( i, iRel-N0, El.ComplexDouble(-3,8) )
if x1+1 < N1:
A.Update( i, i+N0, El.ComplexDouble(4,9) )
A.Update( i, iRel+N0, El.ComplexDouble(3,10) )
# The dense last column
A.Update( i, width-1, El.ComplexDouble(-10/height) );
return A
A = ConcatFD2D(n0,n1)
b = El.DistMatrix(El.zTag)
#El.Gaussian( b, n0*n1, 1 )
El.Ones( b, n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
if output:
El.Print( A, "A" )
El.Print( b, "b" )
ctrl = El.BPCtrl_z()
ctrl.ipmCtrl.mehrotraCtrl.minTol = 1e-5
ctrl.ipmCtrl.mehrotraCtrl.targetTol = 1e-8
ctrl.ipmCtrl.mehrotraCtrl.time = True
ctrl.ipmCtrl.mehrotraCtrl.progress = True
ctrl.ipmCtrl.mehrotraCtrl.solveCtrl.progress = True
startBP = El.mpi.Time()
x = El.BP( A, b, ctrl )
endBP = El.mpi.Time()
if worldRank == 0:
print "BP time:", endBP-startBP, "seconds"
if display:
El.Display( x, "x" )
if output:
El.Print( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
e = El.DistMatrix(El.zTag)
El.Copy( b, e )
El.Gemv( El.NORMAL, El.ComplexDouble(-1), A, x, El.ComplexDouble(1), e )
if display:
El.Display( e, "e" )
if output:
El.Print( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| A x - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
mcopik/Elemental
|
examples/interface/BPComplexDense.py
|
Python
|
bsd-3-clause
| 2,569
|
[
"Gaussian"
] |
6b28c9fcd2f537decb9d0deb51cbd7628fc31827d97d9c899300491876f72621
|
#!/usr/bin/env python
# Copyright 2017-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Mario Motta
# Yang Gao
# Qiming Sun <osirpt.sun@gmail.com>
# Jason Yu
# Alec White
#
from functools import reduce
import numpy as np
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import scf
from pyscf.cc import uccsd
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.lib.kpts_helper import gamma_point
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM # noqa
from pyscf.pbc.mp.kump2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
from pyscf.pbc.cc import kintermediates_uhf
from pyscf import __config__
einsum = lib.einsum
# --- list2array
def mo_c_list_to_array(mo_coeff):
mo_coeff_tmp=[]
for js in range(2):
tmp_nk = len(mo_coeff[js])
tmp_nb = mo_coeff[js][0].shape[0]
tmp_array = np.zeros((tmp_nk,tmp_nb,tmp_nb),dtype=complex)
for ik in range(tmp_nk):
tmp_array[ik,:,:]=mo_coeff[js][ik][:,:]
mo_coeff_tmp.append(tmp_array)
return mo_coeff_tmp
def convert_mo_coeff(mo_coeff):
if isinstance(mo_coeff[0], list):
mo_coeff=mo_c_list_to_array(mo_coeff)
return mo_coeff
def update_amps(cc, t1, t2, eris):
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht1a = np.zeros_like(t1a)
Ht1b = np.zeros_like(t1b)
Ht2aa = np.zeros_like(t2aa)
Ht2ab = np.zeros_like(t2ab)
Ht2bb = np.zeros_like(t2bb)
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
#fvv_ = eris.fock[0][:,nocca:,nocca:]
#fVV_ = eris.fock[1][:,noccb:,noccb:]
#foo_ = eris.fock[0][:,:nocca,:nocca]
#fOO_ = eris.fock[1][:,:noccb,:noccb]
fov_ = eris.fock[0][:,:nocca,nocca:]
fOV_ = eris.fock[1][:,:noccb,noccb:]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(cc, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] + cc.level_shift for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] + cc.level_shift for e in eris.mo_energy[1]]
Fvv_, FVV_ = kintermediates_uhf.cc_Fvv(cc, t1, t2, eris)
Foo_, FOO_ = kintermediates_uhf.cc_Foo(cc, t1, t2, eris)
Fov_, FOV_ = kintermediates_uhf.cc_Fov(cc, t1, t2, eris)
# Move energy terms to the other side
for k in range(nkpts):
Fvv_[k][np.diag_indices(nvira)] -= mo_ea_v[k]
FVV_[k][np.diag_indices(nvirb)] -= mo_eb_v[k]
Foo_[k][np.diag_indices(nocca)] -= mo_ea_o[k]
FOO_[k][np.diag_indices(noccb)] -= mo_eb_o[k]
# Get the momentum conservation array
kconserv = cc.khelper.kconserv
# T1 equation
P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
Ht1a += fov_.conj()
Ht1b += fOV_.conj()
Ht1a += einsum('xyximae,yme->xia', t2aa, Fov_)
Ht1a += einsum('xyximae,yme->xia', t2ab, FOV_)
Ht1b += einsum('xyximae,yme->xia', t2bb, FOV_)
Ht1b += einsum('yxymiea,yme->xia', t2ab, Fov_)
Ht1a -= einsum('xyzmnae, xzymine->zia', t2aa, eris.ooov)
Ht1a -= einsum('xyzmNaE, xzymiNE->zia', t2ab, eris.ooOV)
#Ht1a -= einsum('xyzmnae,xzymine,xyzw->zia', t2aa, eris.ooov, P)
#Ht1a -= einsum('xyzmNaE,xzymiNE,xyzw->zia', t2ab, eris.ooOV, P)
Ht1b -= einsum('xyzmnae, xzymine->zia', t2bb, eris.OOOV)
#Ht1b -= einsum('xyzmnae,xzymine,xyzw->zia', t2bb, eris.OOOV, P)
Ht1b -= einsum('yxwnmea,xzymine,xyzw->zia', t2ab, eris.OOov, P)
for ka in range(nkpts):
Ht1a[ka] += einsum('ie,ae->ia', t1a[ka], Fvv_[ka])
Ht1b[ka] += einsum('ie,ae->ia', t1b[ka], FVV_[ka])
Ht1a[ka] -= einsum('ma,mi->ia', t1a[ka], Foo_[ka])
Ht1b[ka] -= einsum('ma,mi->ia', t1b[ka], FOO_[ka])
for km in range(nkpts):
# ka == ki; km == kf == km
# <ma||if> = [mi|af] - [mf|ai]
# => [mi|af] - [fm|ia]
Ht1a[ka] += einsum('mf,aimf->ia', t1a[km], eris.voov[ka, ka, km])
Ht1a[ka] -= einsum('mf,miaf->ia', t1a[km], eris.oovv[km, ka, ka])
Ht1a[ka] += einsum('MF,aiMF->ia', t1b[km], eris.voOV[ka, ka, km])
# miaf - mfai => miaf - fmia
Ht1b[ka] += einsum('MF,AIMF->IA', t1b[km], eris.VOOV[ka, ka, km])
Ht1b[ka] -= einsum('MF,MIAF->IA', t1b[km], eris.OOVV[km, ka, ka])
Ht1b[ka] += einsum('mf,fmIA->IA', t1a[km], eris.voOV[km, km, ka].conj())
for kf in range(nkpts):
ki = ka
ke = kconserv[ki, kf, km]
Ht1a[ka] += einsum('imef,fmea->ia', t2aa[ki,km,ke], eris.vovv[kf,km,ke].conj())
Ht1a[ka] += einsum('iMeF,FMea->ia', t2ab[ki,km,ke], eris.VOvv[kf,km,ke].conj())
Ht1b[ka] += einsum('IMEF,FMEA->IA', t2bb[ki,km,ke], eris.VOVV[kf,km,ke].conj())
Ht1b[ka] += einsum('mIfE,fmEA->IA', t2ab[km,ki,kf], eris.voVV[kf,km,ke].conj())
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
# Fvv equation
Ftmpa_kb = Fvv_[kb] - 0.5 * einsum('mb,me->be', t1a[kb], Fov_[kb])
Ftmpb_kb = FVV_[kb] - 0.5 * einsum('MB,ME->BE', t1b[kb], FOV_[kb])
Ftmpa_ka = Fvv_[ka] - 0.5 * einsum('mb,me->be', t1a[ka], Fov_[ka])
Ftmpb_ka = FVV_[ka] - 0.5 * einsum('MB,ME->BE', t1b[ka], FOV_[ka])
tmp = einsum('ijae,be->ijab', t2aa[ki, kj, ka], Ftmpa_kb)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('IJAE,BE->IJAB', t2bb[ki, kj, ka], Ftmpb_kb)
Ht2bb[ki, kj, ka] += tmp
tmp = einsum('iJaE,BE->iJaB', t2ab[ki, kj, ka], Ftmpb_kb)
Ht2ab[ki, kj, ka] += tmp
tmp = einsum('iJeB,ae->iJaB', t2ab[ki, kj, ka], Ftmpa_ka)
Ht2ab[ki, kj, ka] += tmp
#P(ab)
tmp = einsum('ijbe,ae->ijab', t2aa[ki, kj, kb], Ftmpa_ka)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IJBE,AE->IJAB', t2bb[ki, kj, kb], Ftmpb_ka)
Ht2bb[ki, kj, ka] -= tmp
# Foo equation
Ftmpa_kj = Foo_[kj] + 0.5 * einsum('je,me->mj', t1a[kj], Fov_[kj])
Ftmpb_kj = FOO_[kj] + 0.5 * einsum('JE,ME->MJ', t1b[kj], FOV_[kj])
Ftmpa_ki = Foo_[ki] + 0.5 * einsum('je,me->mj', t1a[ki], Fov_[ki])
Ftmpb_ki = FOO_[ki] + 0.5 * einsum('JE,ME->MJ', t1b[ki], FOV_[ki])
tmp = einsum('imab,mj->ijab', t2aa[ki, kj, ka], Ftmpa_kj)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IMAB,MJ->IJAB', t2bb[ki, kj, ka], Ftmpb_kj)
Ht2bb[ki, kj, ka] -= tmp
tmp = einsum('iMaB,MJ->iJaB', t2ab[ki, kj, ka], Ftmpb_kj)
Ht2ab[ki, kj, ka] -= tmp
tmp = einsum('mJaB,mi->iJaB', t2ab[ki, kj, ka], Ftmpa_ki)
Ht2ab[ki, kj, ka] -= tmp
#P(ij)
tmp = einsum('jmab,mi->ijab', t2aa[kj, ki, ka], Ftmpa_ki)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('JMAB,MI->IJAB', t2bb[kj, ki, ka], Ftmpb_ki)
Ht2bb[ki, kj, ka] += tmp
# T2 equation
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
Ht2aa += (eris_ovov.transpose(0,2,1,3,5,4,6) - eris_ovov.transpose(2,0,1,5,3,4,6)).conj()
Ht2bb += (eris_OVOV.transpose(0,2,1,3,5,4,6) - eris_OVOV.transpose(2,0,1,5,3,4,6)).conj()
Ht2ab += eris_ovOV.transpose(0,2,1,3,5,4,6).conj()
tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
Woooo, WooOO, WOOOO = kintermediates_uhf.cc_Woooo(cc, t1, t2, eris)
# Add the contributions from Wvvvv
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Woooo[km,ki,kn] += .5 * einsum('xmenf, xijef->minj', eris_ovov[km,:,kn], tauaa[ki,kj])
WOOOO[km,ki,kn] += .5 * einsum('xMENF, xIJEF->MINJ', eris_OVOV[km,:,kn], taubb[ki,kj])
WooOO[km,ki,kn] += .5 * einsum('xmeNF, xiJeF->miNJ', eris_ovOV[km,:,kn], tauab[ki,kj])
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Ht2aa[ki,kj,:] += einsum('minj,wmnab->wijab', Woooo[km,ki,kn], tauaa[km,kn]) * .5
Ht2bb[ki,kj,:] += einsum('MINJ,wMNAB->wIJAB', WOOOO[km,ki,kn], taubb[km,kn]) * .5
Ht2ab[ki,kj,:] += einsum('miNJ,wmNaB->wiJaB', WooOO[km,ki,kn], tauab[km,kn])
add_vvvv_(cc, (Ht2aa, Ht2ab, Ht2bb), t1, t2, eris)
Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO = \
kintermediates_uhf.cc_Wovvo(cc, t1, t2, eris)
#:Ht2ab += einsum('xwzimae,wvumeBJ,xwzv,wuvy->xyziJaB', t2aa, WovVO, P, P)
#:Ht2ab += einsum('xwziMaE,wvuMEBJ,xwzv,wuvy->xyziJaB', t2ab, WOVVO, P, P)
#:Ht2ab -= einsum('xie,zma,uwzBJme,zuwx,xyzu->xyziJaB', t1a, t1a, eris.VOov, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2aa[kx,kw,kz], WovVO[kw,kv,ku])
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2ab[kx,kw,kz], WOVVO[kw,kv,ku])
#for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
# kx = kconserv[kz,kw,ku]
# ky = kconserv[kz,kx,ku]
# continue
# Ht2ab[kx, ky, kz] -= lib.einsum('ie, ma, emjb->ijab', t1a[kx], t1a[kz], eris.voOV[kx,kz,kw].conj())
Ht2ab -= einsum('xie, yma, xyzemjb->xzyijab', t1a, t1a, eris.voOV[:].conj())
#:Ht2ab += einsum('wxvmIeA,wvumebj,xwzv,wuvy->yxujIbA', t2ab, Wovvo, P, P)
#:Ht2ab += einsum('wxvMIEA,wvuMEbj,xwzv,wuvy->yxujIbA', t2bb, WOVvo, P, P)
#:Ht2ab -= einsum('xIE,zMA,uwzbjME,zuwx,xyzu->yxujIbA', t1b, t1b, eris.voOV, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
# #Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2ab[kw,kx,kv], Wovvo[kw,kv,ku])
# #Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2bb[kw,kx,kv], WOVvo[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2ab[km,:,ke], Wovvo[km,ke,kb])
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2bb[km,:,ke], WOVvo[km,ke,kb])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[ky,kx,ku] -= lib.einsum('ie, ma, bjme->jiba', t1b[kx], t1b[kz], eris.voOV[ku,kw,kz])
#:Ht2ab += einsum('xwviMeA,wvuMebJ,xwzv,wuvy->xyuiJbA', t2ab, WOvvO, P, P)
#:Ht2ab -= einsum('xie,zMA,zwuMJbe,zuwx,xyzu->xyuiJbA', t1a, t1b, eris.OOvv, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
# Ht2ab[kx,ky,ku] += lib.einsum('imea,mebj->ijba', t2ab[kx,kw,kv],WOvvO[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[:,kj,kb] += einsum('ximea, mebj->xijba', t2ab[:,km,ke], WOvvO[km,ke,kb])
for kz,ku,kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[kx,ky,ku] -= lib.einsum('ie, ma, mjbe->ijba', t1a[kx], t1b[kz], eris.OOvv[kz, kw, ku])
#:Ht2ab += einsum('wxzmIaE,wvumEBj,xwzv,wuvy->yxzjIaB', t2ab, WoVVo, P, P)
#:Ht2ab -= einsum('xIE,zma,zwumjBE,zuwx,xyzu->yxzjIaB', t1b, t1a, eris.ooVV, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[ky, kx, kz] += lib.einsum('miae,mebj->jiab', t2ab[kw,kx,kz], WoVVo[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
Ht2ab[ky,kx,kz] -= lib.einsum('ie, ma, mjbe->jiab', t1b[kx], t1a[kz], eris.ooVV[kz,kw,ku])
#:u2aa = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2aa, Wovvo, P, P)
#:u2aa += einsum('xwziMaE,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WOVvo, P, P)
#Left this in to keep proper shape, need to replace later
u2aa = np.zeros_like(t2aa)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2aa[kx,kw,kz], Wovvo[kw,kv,ku])
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2ab[kx,kw,kz], WOVvo[kw,kv,ku])
#:u2aa += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1a, t1a, eris.oovv, P, P)
#:u2aa -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1a, t1a, eris.voov, P, P)
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
u2aa[kx,ky,kz] += lib.einsum('ie,ma,mjbe->ijab',t1a[kx],t1a[kz],eris.oovv[kz,kw,ku])
u2aa[kx,ky,kz] -= lib.einsum('ie,ma,bjme->ijab',t1a[kx],t1a[kz],eris.voov[ku,kw,kz])
#:u2aa += np.einsum('xie,uyzbjae,uzyx->xyzijab', t1a, eris.vovv, P)
#:u2aa -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooov.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2aa[kx, ky, kz] += lib.einsum('ie, bjae->ijab', t1a[kx], eris.vovv[ku,ky,kz])
u2aa[kx, ky, kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooov[kx,kz,ky].conj())
u2aa = u2aa - u2aa.transpose(1,0,2,4,3,5,6)
u2aa = u2aa - einsum('xyzijab,xyzu->xyuijba', u2aa, P)
Ht2aa += u2aa
#:u2bb = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2bb, WOVVO, P, P)
#:u2bb += einsum('wxvMiEa,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WovVO, P, P)
#:u2bb += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1b, t1b, eris.OOVV, P, P)
#:u2bb -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1b, t1b, eris.VOOV, P, P)
u2bb = np.zeros_like(t2bb)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw,kv, ku]
u2bb[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2bb[kx,kw,kz], WOVVO[kw,kv,ku])
u2bb[kx, ky, kz] += lib.einsum('miea, mebj-> ijab', t2ab[kw,kx,kv],WovVO[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
u2bb[kx, ky, kz] += lib.einsum('ie, ma, mjbe->ijab',t1b[kx],t1b[kz],eris.OOVV[kz,kw,ku])
u2bb[kx, ky, kz] -= lib.einsum('ie, ma, bjme->ijab', t1b[kx], t1b[kz],eris.VOOV[ku,kw,kz])
#:u2bb += np.einsum('xie,uzybjae,uzyx->xyzijab', t1b, eris.VOVV, P)
#:u2bb -= np.einsum('zma,xzyimjb->xyzijab', t1b, eris.OOOV.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2bb[kx,ky,kz] += lib.einsum('ie,bjae->ijab', t1b[kx], eris.VOVV[ku,ky,kz])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# u2bb[kx,ky,kz] -= lib.einsum('ma, imjb-> ijab', t1b[kz], eris.OOOV[kx,kz,ky].conj())
u2bb -= einsum('zma, xzyimjb->xyzijab', t1b, eris.OOOV[:].conj())
u2bb = u2bb - u2bb.transpose(1,0,2,4,3,5,6)
u2bb = u2bb - einsum('xyzijab,xyzu->xyuijba', u2bb, P)
Ht2bb += u2bb
#:Ht2ab += np.einsum('xie,uyzBJae,uzyx->xyziJaB', t1a, eris.VOvv, P)
#:Ht2ab += np.einsum('yJE,zxuaiBE,zuxy->xyziJaB', t1b, eris.voVV, P)
#:Ht2ab -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooOV.conj())
#:Ht2ab -= np.einsum('umb,yuxjmia,xyuz->xyzijab', t1b, eris.OOov.conj(), P)
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky,ku,kx]
Ht2ab[kx,ky,kz] += lib.einsum('ie, bjae-> ijab', t1a[kx], eris.VOvv[ku,ky,kz])
Ht2ab[kx,ky,kz] += lib.einsum('je, aibe-> ijab', t1b[ky], eris.voVV[kz,kx,ku])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# Ht2ab[kx,ky,kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooOV[kx,kz,ky].conj())
Ht2ab -= einsum('zma, xzyimjb->xyzijab', t1a, eris.ooOV[:].conj())
for kx, ky, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[kx, ku, ky]
Ht2ab[kx,ky,kz] -= lib.einsum('mb,jmia->ijab',t1b[ku],eris.OOov[ky,ku,kx].conj())
eia = []
eIA = []
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
for ki in range(nkpts):
ka = ki
# Remove zero/padded elements from denominator
Ht1a[ki] /= eia[ki][ka]
Ht1b[ki] /= eIA[ki][ka]
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
eijab = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Ht2aa[ki,kj,ka] /= eijab
eijab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2ab[ki,kj,ka] /= eijab
eijab = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2bb[ki,kj,ka] /= eijab
time0 = log.timer_debug1('update t1 t2', *time0)
return (Ht1a, Ht1b), (Ht2aa, Ht2ab, Ht2bb)
def get_normt_diff(cc, t1, t2, t1new, t2new):
'''Calculates norm(t1 - t1new) + norm(t2 - t2new).'''
return (np.linalg.norm(t1new[0] - t1[0])**2 +
np.linalg.norm(t1new[1] - t1[1])**2 +
np.linalg.norm(t2new[0] - t2[0])**2 +
np.linalg.norm(t2new[1] - t2[1])**2 +
np.linalg.norm(t2new[2] - t2[2])**2) ** .5
def energy(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
kka, noa, nva = t1a.shape
kkb, nob, nvb = t1b.shape
assert(kka == kkb)
nkpts = kka
s = 0.0 + 0j
fa, fb = eris.fock
for ki in range(nkpts):
s += einsum('ia,ia', fa[ki, :noa, noa:], t1a[ki, :, :])
s += einsum('ia,ia', fb[ki, :nob, nob:], t1b[ki, :, :])
t1t1aa = np.zeros(shape=t2aa.shape, dtype=t2aa.dtype)
t1t1ab = np.zeros(shape=t2ab.shape, dtype=t2ab.dtype)
t1t1bb = np.zeros(shape=t2bb.shape, dtype=t2bb.dtype)
for ki in range(nkpts):
ka = ki
for kj in range(nkpts):
t1t1aa[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1a[kj, :, :])
t1t1ab[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1b[kj, :, :])
t1t1bb[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1b[ki, :, :], t1b[kj, :, :])
tauaa = t2aa + 2*t1t1aa
tauab = t2ab + t1t1ab
taubb = t2bb + 2*t1t1bb
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,tauaa) -
einsum('yzxjaib,xyzijab->',eris.ovov,tauaa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,tauab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,taubb) -
einsum('yzxjaib,xyzijab->',eris.OVOV,taubb))
e = s + d
e /= nkpts
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in KCCSD energy %s', e)
return e.real
#def get_nocc(cc, per_kpoint=False):
# '''See also function get_nocc in pyscf/pbc/mp2/kmp2.py'''
# if cc._nocc is not None:
# return cc._nocc
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nocca = [(np.count_nonzero(cc.mo_occ[0][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
# noccb = [(np.count_nonzero(cc.mo_occ[1][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nocca = np.amax(nocca)
# noccb = np.amax(noccb)
# return nocca, noccb
#
#def get_nmo(cc, per_kpoint=False):
# '''See also function get_nmo in pyscf/pbc/mp2/kmp2.py'''
# if cc._nmo is not None:
# return cc._nmo
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nmoa = [(cc.mo_occ[0][k].size - cc.frozen) for k in range(cc.nkpts)]
# nmob = [(cc.mo_occ[1][k].size - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nmoa = np.amax(nmoa)
# nmob = np.amax(nmob)
# return nmoa, nmob
#
#def get_frozen_mask(cc):
# '''See also get_frozen_mask function in pyscf/pbc/mp2/kmp2.py'''
#
# moidxa = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[0]]
# moidxb = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[1]]
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# for idx in moidxa:
# idx[:cc.frozen] = False
# for idx in moidxb:
# idx[:cc.frozen] = False
# else:
# raise NotImplementedError
#
# return moidxa, moisxb
def amplitudes_to_vector(t1, t2):
return np.hstack((t1[0].ravel(), t1[1].ravel(),
t2[0].ravel(), t2[1].ravel(), t2[2].ravel()))
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
nkpts**3*noccb**2*nvirb**2)
sections = np.cumsum(sizes[:-1])
t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)
t1a = t1a.reshape(nkpts,nocca,nvira)
t1b = t1b.reshape(nkpts,noccb,nvirb)
t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def add_vvvv_(cc, Ht2, t1, t2, eris):
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht2aa, Ht2ab, Ht2bb = Ht2
if cc.direct and getattr(eris, 'Lpv', None) is not None:
def get_Wvvvv(ka, kc, kb):
kd = kconserv[ka,kc,kb]
Lpv = eris.Lpv
LPV = eris.LPV
Lbd = (Lpv[kb,kd][:,nocca:] -
lib.einsum('Lkd,kb->Lbd', Lpv[kb,kd][:,:nocca], t1a[kb]))
Wvvvv = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], Lbd)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
Lpv[kb,kd][:,nocca:])
Wvvvv -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
LBD = (LPV[kb,kd][:,noccb:] -
lib.einsum('Lkd,kb->Lbd', LPV[kb,kd][:,:noccb], t1b[kb]))
WvvVV = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
LPV[kb,kd][:,noccb:])
WvvVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
WVVVV = lib.einsum('Lac,Lbd->acbd', LPV[ka,kc][:,noccb:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', LPV[ka,kc][:,:noccb],
LPV[kb,kd][:,noccb:])
WVVVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1b[ka])
Wvvvv *= (1./nkpts)
WvvVV *= (1./nkpts)
WVVVV *= (1./nkpts)
return Wvvvv, WvvVV, WVVVV
else:
_Wvvvv, _WvvVV, _WVVVV = kintermediates_uhf.cc_Wvvvv_half(cc, t1, t2, eris)
def get_Wvvvv(ka, kc, kb):
return _Wvvvv[ka,kc,kb], _WvvVV[ka,kc,kb], _WVVVV[ka,kc,kb]
#:Ht2aa += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', tauaa, _Wvvvv-_Wvvvv.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2bb += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', taubb, _WVVVV-_WVVVV.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2ab += np.einsum('xyuiJeF,zuwaeBF,xyuv,zwuv->xyziJaB', tauab, _WvvVV, P, P)
for ka, kb, kc in kpts_helper.loop_kkk(nkpts):
kd = kconserv[ka,kc,kb]
Wvvvv, WvvVV, WVVVV = get_Wvvvv(ka, kc, kb)
for ki in range(nkpts):
kj = kconserv[ka,ki,kb]
tauaa = t2aa[ki,kj,kc].copy()
tauab = t2ab[ki,kj,kc].copy()
taubb = t2bb[ki,kj,kc].copy()
if ki == kc and kj == kd:
tauaa += einsum('ic,jd->ijcd', t1a[ki], t1a[kj])
tauab += einsum('ic,jd->ijcd', t1a[ki], t1b[kj])
taubb += einsum('ic,jd->ijcd', t1b[ki], t1b[kj])
if ki == kd and kj == kc:
tauaa -= einsum('id,jc->ijcd', t1a[ki], t1a[kj])
taubb -= einsum('id,jc->ijcd', t1b[ki], t1b[kj])
tmp = lib.einsum('acbd,ijcd->ijab', Wvvvv, tauaa) * .5
Ht2aa[ki,kj,ka] += tmp
Ht2aa[ki,kj,kb] -= tmp.transpose(0,1,3,2)
tmp = lib.einsum('acbd,ijcd->ijab', WVVVV, taubb) * .5
Ht2bb[ki,kj,ka] += tmp
Ht2bb[ki,kj,kb] -= tmp.transpose(0,1,3,2)
Ht2ab[ki,kj,ka] += lib.einsum('acbd,ijcd->ijab', WvvVV, tauab)
Wvvvv = WvvVV = WVVVV = None
_Wvvvv = _WvvVV = _WVVVV = None
# Contractions below are merged to Woooo intermediates
# tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
# P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
# minj = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.ovov, tauaa, P, P)
# MINJ = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.OVOV, taubb, P, P)
# miNJ = np.einsum('xwymeNF,uvwiJeF,xywz,uvwz->xuymiNJ', eris.ovOV, tauab, P, P)
# Ht2aa += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', minj, tauaa, P) * .25
# Ht2bb += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', MINJ, taubb, P) * .25
# Ht2ab += np.einsum('xuymiNJ,xywmNaB,xyuv->uvwiJaB', miNJ, tauab, P) * .5
return (Ht2aa, Ht2ab, Ht2bb)
class KUCCSD(uccsd.UCCSD):
max_space = getattr(__config__, 'pbc_cc_kccsd_uhf_KUCCSD_max_space', 20)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
assert(isinstance(mf, scf.khf.KSCF))
uccsd.UCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
self.kpts = mf.kpts
self.mo_energy = mf.mo_energy
self.khelper = kpts_helper.KptsHelper(mf.cell, self.kpts)
self.direct = True # If possible, use GDF to compute Wvvvv on-the-fly
keys = set(['kpts', 'mo_energy', 'khelper', 'max_space', 'direct'])
self._keys = self._keys.union(keys)
@property
def nkpts(self):
return len(self.kpts)
get_normt_diff = get_normt_diff
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
update_amps = update_amps
energy = energy
def dump_flags(self, verbose=None):
return uccsd.UCCSD.dump_flags(self, verbose)
def ao2mo(self, mo_coeff=None):
from pyscf.pbc.df.df import GDF
cell = self._scf.cell
nkpts = self.nkpts
nmoa, nmob = self.nmo
mem_incore = nkpts**3 * (nmoa**4 + nmob**4) * 8 / 1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now < self.max_memory) or self.mol.incore_anyway:
return _make_eris_incore(self, mo_coeff)
elif (self.direct and type(self._scf.with_df) is GDF
and cell.dimension != 2):
# DFKCCSD does not support MDF
return _make_df_eris(self, mo_coeff)
else:
return _make_eris_outcore(self, mo_coeff)
def init_amps(self, eris):
time0 = logger.process_clock(), logger.perf_counter()
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nkpts = self.nkpts
t1a = np.zeros((nkpts, nocca, nvira), dtype=np.complex128)
t1b = np.zeros((nkpts, noccb, nvirb), dtype=np.complex128)
t1 = (t1a, t1b)
t2aa = np.zeros((nkpts, nkpts, nkpts, nocca, nocca, nvira, nvira), dtype=np.complex128)
t2ab = np.zeros((nkpts, nkpts, nkpts, nocca, noccb, nvira, nvirb), dtype=np.complex128)
t2bb = np.zeros((nkpts, nkpts, nkpts, noccb, noccb, nvirb, nvirb), dtype=np.complex128)
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] for e in eris.mo_energy[1]]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(self, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
eia = []
eIA = []
# Create denominators, ignoring padded elements
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
Daa = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Dab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Dbb = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
t2aa[ki,kj,ka] = eris.ovov[ki,ka,kj].conj().transpose((0,2,1,3)) / Daa
t2aa[ki,kj,ka]-= eris.ovov[kj,ka,ki].conj().transpose((2,0,1,3)) / Daa
t2ab[ki,kj,ka] = eris.ovOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dab
t2bb[ki,kj,ka] = eris.OVOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dbb
t2bb[ki,kj,ka]-= eris.OVOV[kj,ka,ki].conj().transpose((2,0,1,3)) / Dbb
t2 = (t2aa,t2ab,t2bb)
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,t2aa) -
einsum('yzxjaib,xyzijab->',eris.ovov,t2aa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,t2ab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,t2bb) -
einsum('yzxjaib,xyzijab->',eris.OVOV,t2bb))
self.emp2 = d/nkpts
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2.real)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
def amplitudes_to_vector(self, t1, t2):
return amplitudes_to_vector(t1, t2)
def vector_to_amplitudes(self, vec, nmo=None, nocc=None, nkpts=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
if nkpts is None: nkpts = self.nkpts
return vector_to_amplitudes(vec, nmo, nocc, nkpts)
UCCSD = KUCCSD
#######################################
#
# _ERIS.
#
# Note the two electron integrals are stored in different orders from
# kccsd_rhf._ERIS. Integrals (ab|cd) are stored as [ka,kb,kc,a,b,c,d] here
# while the order is [ka,kc,kb,a,c,b,d] in kccsd_rhf._ERIS
#
# TODO: use the same convention as kccsd_rhf
#
def _make_eris_incore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0])
eris.oooo = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype=dtype)
eris.ooov = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype=dtype)
eris.oovv = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype=dtype)
eris.ovov = np.empty((nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype=dtype)
eris.voov = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype=dtype)
eris.vovv = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype=dtype)
eris.OOOO = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype=dtype)
eris.OOOV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype=dtype)
eris.OOVV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype=dtype)
eris.OVOV = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype=dtype)
eris.VOOV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype=dtype)
eris.VOVV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype=dtype)
eris.ooOO = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype=dtype)
eris.ooOV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype=dtype)
eris.ooVV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype=dtype)
eris.ovOV = np.empty((nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype=dtype)
eris.voOV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype=dtype)
eris.voVV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype=dtype)
eris.OOoo = None
eris.OOov = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype=dtype)
eris.OOvv = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype=dtype)
eris.OVov = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype=dtype)
eris.VOov = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype=dtype)
eris.VOvv = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype=dtype)
_kuccsd_eris_common_(cc, eris)
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
eris.vvvv = thisdf.ao2mo_7d(orbva, factor=1./nkpts)
eris.VVVV = thisdf.ao2mo_7d(orbvb, factor=1./nkpts)
eris.vvVV = thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], factor=1./nkpts)
return eris
def _kuccsd_eris_common_(cc, eris, buf=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
#if not (cc.frozen is None or cc.frozen == 0):
# raise NotImplementedError('cc.frozen = %s' % str(cc.frozen))
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.new_logger(cc)
cell = cc._scf.cell
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
mo_coeff = eris.mo_coeff
nocca, noccb = eris.nocc
nmoa, nmob = cc.nmo
mo_a, mo_b = mo_coeff
# Re-make our fock MO matrix elements from density and fock AO
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
hcore = cc._scf.get_hcore()
with lib.temporary_env(cc._scf, exxdiv=None):
vhf = cc._scf.get_veff(cell, dm)
focka = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[0][k], mo))
for k, mo in enumerate(mo_a)]
fockb = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[1][k], mo))
for k, mo in enumerate(mo_b)]
eris.fock = (np.asarray(focka), np.asarray(fockb))
eris.e_hf = cc._scf.energy_tot(dm=dm, vhf=vhf)
madelung = tools.madelung(cell, kpts)
mo_ea = [focka[k].diagonal().real for k in range(nkpts)]
mo_eb = [fockb[k].diagonal().real for k in range(nkpts)]
mo_ea = [_adjust_occ(e, nocca, -madelung) for e in mo_ea]
mo_eb = [_adjust_occ(e, noccb, -madelung) for e in mo_eb]
eris.mo_energy = (mo_ea, mo_eb)
orboa = np.asarray(mo_coeff[0][:,:,:nocca], order='C')
orbob = np.asarray(mo_coeff[1][:,:,:noccb], order='C')
#orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
#orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
dtype = np.result_type(*focka).char
# The momentum conservation array
kconserv = cc.khelper.kconserv
out = None
if isinstance(buf, h5py.Group):
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.oooo[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,:nocca]
eris.ooov[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,nocca:]
eris.oovv[kp,kq,kr] = tmp[:nocca,:nocca,nocca:,nocca:]
eris.ovov[kp,kq,kr] = tmp[:nocca,nocca:,:nocca,nocca:]
eris.voov[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.vovv[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.OOOO[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,:noccb]
eris.OOOV[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,noccb:]
eris.OOVV[kp,kq,kr] = tmp[:noccb,:noccb,noccb:,noccb:]
eris.OVOV[kp,kq,kr] = tmp[:noccb,noccb:,:noccb,noccb:]
eris.VOOV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.VOVV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.ooOO[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,:noccb]
eris.ooOV[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,noccb:]
eris.ooVV[kp,kq,kr] = tmp[:nocca,:nocca,noccb:,noccb:]
eris.ovOV[kp,kq,kr] = tmp[:nocca,nocca:,:noccb,noccb:]
eris.voOV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.voVV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
#eris.OOoo[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,:nocca]
eris.OOov[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,nocca:]
eris.OOvv[kp,kq,kr] = tmp[:noccb,:noccb,nocca:,nocca:]
eris.OVov[kp,kq,kr] = tmp[:noccb,noccb:,:nocca,nocca:]
eris.VOov[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.VOvv[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
log.timer('CCSD integral transformation', *cput0)
return eris
def _make_eris_outcore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0]).char
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = feri.create_dataset('vvvv', (nkpts,nkpts,nkpts,nvira,nvira,nvira,nvira), dtype)
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = feri.create_dataset('VVVV', (nkpts,nkpts,nkpts,nvirb,nvirb,nvirb,nvirb), dtype)
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = feri.create_dataset('vvVV', (nkpts,nkpts,nkpts,nvira,nvira,nvirb,nvirb), dtype)
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
thisdf.ao2mo_7d(orbva, cc.kpts, factor=1./nkpts, out=eris.vvvv)
thisdf.ao2mo_7d(orbvb, cc.kpts, factor=1./nkpts, out=eris.VVVV)
thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], cc.kpts, factor=1./nkpts, out=eris.vvVV)
return eris
def _make_df_eris(cc, mo_coeff=None):
from pyscf.pbc.df import df
from pyscf.ao2mo import _ao2mo
cell = cc._scf.cell
if cell.dimension == 2:
raise NotImplementedError
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
#if getattr(thisdf, 'auxcell', None):
# naux = thisdf.auxcell.nao_nr()
#else:
# naux = thisdf.get_naoaux()
nao = cell.nao_nr()
mo_kpts_a, mo_kpts_b = eris.mo_coeff
if gamma_point(kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_kpts_a)
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = None
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = None
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = None
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
eris.Lpv = Lpv = np.empty((nkpts,nkpts), dtype=object)
eris.LPV = LPV = np.empty((nkpts,nkpts), dtype=object)
with h5py.File(thisdf._cderi, 'r') as f:
kptij_lst = f['j3c-kptij'][:]
tao = []
ao_loc = None
for ki, kpti in enumerate(kpts):
for kj, kptj in enumerate(kpts):
kpti_kptj = np.array((kpti,kptj))
Lpq = np.asarray(df._getitem(f, 'j3c', kpti_kptj, kptij_lst))
mo_a = np.hstack((mo_kpts_a[ki], mo_kpts_a[kj][:,nocca:]))
mo_b = np.hstack((mo_kpts_b[ki], mo_kpts_b[kj][:,noccb:]))
mo_a = np.asarray(mo_a, dtype=dtype, order='F')
mo_b = np.asarray(mo_b, dtype=dtype, order='F')
if dtype == np.double:
outa = _ao2mo.nr_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), aosym='s2')
outb = _ao2mo.nr_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), aosym='s2')
else:
#Note: Lpq.shape[0] != naux if linear dependency is found in auxbasis
if Lpq[0].size != nao**2: # aosym = 's2'
Lpq = lib.unpack_tril(Lpq).astype(np.complex128)
outa = _ao2mo.r_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), tao, ao_loc)
outb = _ao2mo.r_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), tao, ao_loc)
Lpv[ki,kj] = outa.reshape(-1,nmoa,nvira)
LPV[ki,kj] = outb.reshape(-1,nmob,nvirb)
return eris
scf.kuhf.KUHF.CCSD = lib.class_as_method(KUCCSD)
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf import lo
cell = gto.Cell()
cell.atom='''
He 0.000000000000 0.000000000000 0.000000000000
He 1.685068664391 1.685068664391 1.685068664391
'''
#cell.basis = [[0, (1., 1.)], [1, (.5, 1.)]]
cell.basis = [[0, (1., 1.)], [0, (.5, 1.)]]
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.mesh = [13]*3
cell.build()
np.random.seed(2)
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KUHF(cell, kpts=cell.make_kpts([1,1,3]), exxdiv=None)
nmo = cell.nao_nr()
kmf.mo_occ = np.zeros((2,3,nmo))
kmf.mo_occ[0,:,:3] = 1
kmf.mo_occ[1,:,:1] = 1
kmf.mo_energy = np.arange(nmo) + np.random.random((2,3,nmo)) * .3
kmf.mo_energy[kmf.mo_occ == 0] += 2
mo = (np.random.random((2,3,nmo,nmo)) +
np.random.random((2,3,nmo,nmo))*1j - .5-.5j)
s = kmf.get_ovlp()
kmf.mo_coeff = np.empty_like(mo)
nkpts = len(kmf.kpts)
for k in range(nkpts):
kmf.mo_coeff[0,k] = lo.orth.vec_lowdin(mo[0,k], s[k])
kmf.mo_coeff[1,k] = lo.orth.vec_lowdin(mo[1,k], s[k])
def rand_t1_t2(mycc):
nkpts = mycc.nkpts
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
np.random.seed(1)
t1a = (np.random.random((nkpts,nocca,nvira)) +
np.random.random((nkpts,nocca,nvira))*1j - .5-.5j)
t1b = (np.random.random((nkpts,noccb,nvirb)) +
np.random.random((nkpts,noccb,nvirb))*1j - .5-.5j)
t2aa = (np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)) +
np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira))*1j - .5-.5j)
kconserv = kpts_helper.get_kconserv(kmf.cell, kmf.kpts)
t2aa = t2aa - t2aa.transpose(1,0,2,4,3,5,6)
tmp = t2aa.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2aa[ki,kj,kk] = t2aa[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t2ab = (np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb))*1j - .5-.5j)
t2bb = (np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb))*1j - .5-.5j)
t2bb = t2bb - t2bb.transpose(1,0,2,4,3,5,6)
tmp = t2bb.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2bb[ki,kj,kk] = t2bb[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t1 = (t1a, t1b)
t2 = (t2aa, t2ab, t2bb)
return t1, t2
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (2.2677885702176339-2.5150764056992041j))
print(lib.finger(Ht1[1]) - (-51.643438947846086+526.58026126100458j))
print(lib.finger(Ht2[0]) - (-29.490813482748258-8.7509143690136018j))
print(lib.finger(Ht2[1]) - (2256.0440056839416-193.16480896707569j))
print(lib.finger(Ht2[2]) - (-250.59447681063182-397.57189085666982j))
kmf.mo_occ[:] = 0
kmf.mo_occ[:,:,:2] = 1
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (5.4622516572705662+1.990046725028729j))
print(lib.finger(Ht1[1]) - (4.8801120611799043-5.9940463787453488j))
print(lib.finger(Ht2[0]) - (-192.38864512375193+305.14191018543983j))
print(lib.finger(Ht2[1]) - (23085.044505825954-11527.802302550244j))
print(lib.finger(Ht2[2]) - (115.57932548288559-40.888597453928604j))
from pyscf.pbc.cc import kccsd
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
ue = energy(mycc, t1, t2, eris)
print(abs(ge - ue))
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
kmf = kmf.density_fit(auxbasis=[[0, (1., 1.)]])
mycc = KUCCSD(kmf)
eris = _make_df_eris(mycc, mycc.mo_coeff)
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (6.9341372555790013+0.87313546297025901j))
print(lib.finger(Ht1[1]) - (6.7538005829391992-0.95702422534126796j))
print(lib.finger(Ht2[0]) - (-509.24544842179876+448.00925776269855j))
print(lib.finger(Ht2[1]) - (107.5960392010511+40.869216223808067j) )
print(lib.finger(Ht2[2]) - (-196.75910296082139+218.53005038057515j))
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8]))
eris = _make_eris_outcore(mycc, mycc.mo_coeff)
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8,
abs(lib.finger(eris.vvvv) - (-0.080512851258903173-0.2868384266725581j) )<1e-8,
abs(lib.finger(eris.vvVV) - (-0.5137063762484736+1.1036785801263898j) )<1e-8,
#abs(lib.finger(eris.VVvv) - (0.16468487082491939+0.25730725586992997j) )<1e-8,
abs(lib.finger(eris.VVVV) - (-0.56714875196802295+0.058636785679170501j) )<1e-8]))
|
sunqm/pyscf
|
pyscf/pbc/cc/kccsd_uhf.py
|
Python
|
apache-2.0
| 59,554
|
[
"PySCF"
] |
7b6e496c0a496783998630a4051d1ef0e670073481a2404aa53a7de2b1648c30
|
'''
Description
To adapt Brian T's regressions stats code
to our new plotting interface, we revise
his _NeStatsHelper funcion to deliver a Table
string that we can write to a GUI object.
'''
from __future__ import division
from __future__ import print_function
#created by Brian Trethewey
#
#neGrapher is primary interface for graphing data
#neStats is primary interface for creating statistics output file for a dataset
from future import standard_library
standard_library.install_aliases()
from builtins import object
from past.utils import old_div
__filename__ = "pgregressionstats.py"
__date__ = "20171016"
__author__ = "Ted Cosart<ted.cosart@umontana.edu>"
import os
from numpy import mean, median, isnan
from scipy import stats
'''
Now using revised versions of the viz files
from Brian T, so that slopeConfidence
is now slope_confidence:
'''
#from agestrucne.asnviz.LineRegress import slopeConfidence
from agestrucne.asnviz.LineRegress import slope_confidence, alpha_test, calculate_s_score
from agestrucne.pgneestimationtablefilemanager import NeEstimationTableFileManager
class PGRegressionStats( object ):
def __init__( self,
dltup_table=None,
f_confidence_alpha=0.05,
v_significant_value=0 ):
self.__data_table=dltup_table
self.__confidence_alpha=f_confidence_alpha
self.__significant_value=v_significant_value
return
#end __init__
def __get_source_file_name_from_file_manager_key( self, s_field_values ):
'''
This def assumes that the first field,
as delimited by the concatenated field
values delivered by the NeEstimationTableFileManager,
def getDictDataLinesKeyedToColnames,
(currently double underscore, __), will be
the source genepop file name, full path, which we
reduce to file name only.
'''
IDX_FILE_NAME=0
FIELDDELIM=NeEstimationTableFileManager.DELIM_GROUPED_FIELD_NAMES
ls_values=s_field_values.split( FIELDDELIM )
s_file_with_path=ls_values[ IDX_FILE_NAME ]
s_file_name=os.path.basename( s_file_with_path )
return s_file_name
#end __get_source_file_name_from_file_manager_key
def __data_is_sufficient_for_regression(self):
b_return_value=False
if self.__data_table is not None:
if len( self.__data_table ) > 0:
b_all_lines_have_sufficient_data=True
for s_key in self.__data_table:
if len( self.__data_table[ s_key ] ) < 2:
b_all_lines_have_sufficient_data=False
break
#end if length under two
#end for each key
if b_all_lines_have_sufficient_data==True:
b_return_value=True
#end if all lines had enough data
#end there is at least one line
#end if there is a data table
return b_return_value
#end __data_is_sufficient_for_regression
def __get_stats_as_string( self, b_use_file_name_only_for_column_1=True ):
'''
Most of this code is copied directly from Brian's
def in Viz.LineRegress._neStatsHelper.
'''
PRECISION=3
STATS_TABLE_DELIM="\t"
'''
2018_04_11. Using the bullet character causes some OS
python installs to throw the unicode-to-ascii error,
but not in others. Here I take the lazy way out by using
an asterisk instead.
'''
# BULLET=u'\u2022'
BULLET="*"
BULLET_INDENTED=" " + BULLET + " "
s_return=None
b_data_is_sufficient=self.__data_is_sufficient_for_regression()
if b_data_is_sufficient:
table=self.__data_table
#tableFormat = "{:<30}{:<30}{:<50}{:<80}\n"
confPercent = (1.0 - self.__confidence_alpha)*100.0
tableString="-----Table of per-line values-----\n"
'''
2018_03_17. Brian T has revised his def _neStatsHelper to
include a p-value. We accordingly modify this def to include
his revisions.
'''
tableString+=STATS_TABLE_DELIM.join( [ "Source",
"Slope",
"Intercept" ,
"CI("+str(confPercent)+"%)",
"P value",
"\n" ] )
slopeVctr = []
confidenceVctr = []
alpha_vctr=[]
s_score_vctr=[]
Uncountable = 0
'''
2018_04_11. Revisions as posted by Brian T to
repository today or yesterday.
'''
negativeCount=0
zeroCount=0
positiveCount=0
'''
end revisions
'''
ls_keys_sorted=sorted( list(table.keys()) )
for recordKey in ls_keys_sorted:
record = table[recordKey]
s_file_name=recordKey
if b_use_file_name_only_for_column_1:
s_file_name=self.__get_source_file_name_from_file_manager_key( recordKey )
#end if not b_use_all_key_fields
v_return_slope_conf=slope_confidence( self.__confidence_alpha,record)
slope=None; intercept=None; confidence=None
ls_vals_for_table=None
if type( v_return_slope_conf ) == tuple:
#We assume we got numbers if a tuple was returned.
slope, intercept, confidence = v_return_slope_conf
'''
2018_03_17. New code from Brian T's
latest _neStatsHelper
'''
# perform alpha test
alpha_result = alpha_test(self.__significant_value, record)
if alpha_result == 0:
alpha_vctr.append(0)
elif slope > 0:
alpha_vctr.append(1)
else:
alpha_vctr.append(-1)
#get std dev estimate
s_val = calculate_s_score(record)
s_score_vctr.append(s_val)
t_star = old_div(slope,s_val)
#calculate p value DF = num points-2
p_score = stats.t.sf(t_star,len(record)-2)
'''
2018_04_11. From Brian T's revisions as
posted to master repository today or yesterday.
'''
#calculate significant from CDF(p-value)
alpha_check = 1-(abs(p_score-0.5)*2)
if self.__confidence_alpha > alpha_check:
if slope > 0:
positiveCount+=1
else:
negativeCount+=1
else:
zeroCount+=1
#end if confidence alpha > alpha check
'''
End revisions 2018_04_11.
'''
'''
Note that the "float" cast was need (at least in py3 ),
else negative numbers don't get rounded.
'''
ls_rounded_confidence=[ str( round( float( v_val ) , PRECISION ) ) \
for v_val in confidence ]
s_rounded_confidence=", ".join( ls_rounded_confidence )
s_rounded_confidence="(" + s_rounded_confidence + ")"
ls_vals_for_table=[ s_file_name,
str( round( slope, PRECISION ) ),
str( round( intercept, PRECISION ) ) ,
s_rounded_confidence,
str( round( p_score, PRECISION) ),
"\n" ]
if isnan(slope):
Uncountable +=1
else:
slopeVctr.append(slope)
confidenceVctr.append(confidence)
#end if isnan
else:
ls_vals_for_table=[ s_file_name, "NA", "NA", "NA", "NA","\n" ]
#end if slopeConf returned a tuple or couldn't compute
tableString+=STATS_TABLE_DELIM.join( ls_vals_for_table )
#end for each recordKey
if len( slopeVctr ) == 0:
s_return= "Insufficient data."
else:
maxSlope = max(slopeVctr)
minSlope = min(slopeVctr)
meanSlope = mean(slopeVctr)
medSlope = median(slopeVctr)
'''
2018_03_17. New quantity from Brian T's new
version:
'''
averageSscore=mean( s_score_vctr )
'''
2018_04_11. These assignments form the 2018_03_17
are now commented out, by Brian T's revisions
from today or yesterday:
'''
# negativeCount=0
# zeroCount=0
# positiveCount=0
'''
2018_03_17 Remmed out and replaced by new code
by Brian T:
'''
# for cI in confidenceVctr:
# if cI[0]>self.__significant_value:
# positiveCount+=1
# elif cI[1]<self.__significant_value:
# negativeCount+=1
# else:
# zeroCount+=1
# #end for each cI
#change for alpha test
'''
2018_04_11. Change from 2018_03_17,
this for loop is now commented out.
'''
# for value in alpha_vctr:
# if value>0:
# positiveCount+=1
# elif value<0:
# negativeCount+=1
# else:
# zeroCount+=1
#end if val>0 else less else
#end for alpha val
s_stats_string = "Max Regression Slope: "+str( round( maxSlope, PRECISION ) )+"\n"
s_stats_string +="Min Regression Slope: "+str( round( minSlope, PRECISION ) )+"\n"
s_stats_string +="Mean Regression Slope: " +str( round( meanSlope, PRECISION ) )+"\n"
s_stats_string +="Median Regression Slope: "+str( round( medSlope, PRECISION ) )+"\n"
s_stats_string += "Mean Variance Estimate:"+str( round( averageSscore) ) +"\n"
s_stats_string +="\n"
s_stats_string +="Comparison to a slope of "+str( round( self.__significant_value, PRECISION ) ) \
+ " at alpha = " \
+ str( round( self.__confidence_alpha, PRECISION ) )+"\n"
s_stats_string +=BULLET_INDENTED \
+ "Positive Slopes: "+str( round( positiveCount, PRECISION ) )\
+ "\n" + BULLET_INDENTED \
+ "Neutral Slopes: "+str( round( zeroCount, PRECISION ) ) \
+ "\n" + BULLET_INDENTED \
+ "Negative Slopes: "+str( round( negativeCount, PRECISION ) ) \
+ "\n" + BULLET_INDENTED \
+ "Non-Number Slopes: "+str( round( Uncountable, PRECISION ) )
s_stats_string +="\n\n"
s_stats_string +=tableString
s_return=s_stats_string
#end if no slopes were calc'd, else if slopes calcd
else:
s_return= "Insufficient data."
#end if our table has sufficient data else not
return s_return
#end __get_stats_as_string
def setTsvFileManagerDictAsTable( self,
dls_dict_of_xy_lists_by_line_name ):
'''
This def was added in order for clients to
pass in the dictionary associated
with the NeEstimationTableFileManager objects
getDictDataLinesKeyedToColnames, to convert
it to the correct format and then set the
table attribute, self.__data_table.
'''
'''
Helper inner-def:
'''
DELIMITER=NeEstimationTableFileManager.DELIM_TABLE
def convert_to_duple_numeric_pair( s_xy_val_string ):
ls_vals=s_xy_val_string.split( DELIMITER )
v_x=float( ls_vals[0] )
v_y=float( ls_vals[1] )
return ( v_x, v_y )
#end convert_to_duple_numeric_pair
dltup_data_converted_for_regression={}
for s_line_name in dls_dict_of_xy_lists_by_line_name:
ls_xy_strings=dls_dict_of_xy_lists_by_line_name[ s_line_name ]
ls_xy_tuples=[ convert_to_duple_numeric_pair( s_xy ) \
for s_xy in ls_xy_strings ]
dltup_data_converted_for_regression[ s_line_name ]=ls_xy_tuples
#end for each line name
self.__data_table=dltup_data_converted_for_regression
return
#end convertDataFromTsvDictAndRegress
def getStatsTableAsString( self, b_use_file_name_only_for_column_1=True ):
s_table="No data available"
if self.__data_table is not None:
s_table=self.__get_stats_as_string( b_use_file_name_only_for_column_1 )
#end if we have table data
return s_table
#end getStatsTableAsString
'''
2018_04_13. We've added an alpha text box to the
PGNeEstimationRegressplotInterface class, and we
want to be able to update this class attribute when the user
changes the alpha value.
'''
@property
def confidence_alpha( self ):
return self.__confidence_alpha
#end property confidence_alpha
@confidence_alpha.setter
def confidence_alpha( self, f_value ):
self.__confidence_alpha=f_value
return
#end def confidence_alpha
#end class PGRegressionStats
if __name__ == "__main__":
pass
#end if main
|
popgengui/negui
|
agestrucne/pgregressionstats.py
|
Python
|
agpl-3.0
| 11,240
|
[
"Brian"
] |
12c5059f81f1a805f40d30e7149978699068b460968956c65cb3976e8ef93ac3
|
"""
usage: scramble.py [egg_name]
With no arguments, scrambles all eggs necessary according to the
settings in universe_wsgi.ini.
egg_name - Scramble only this egg (as defined in eggs.ini) or 'all'
for all eggs (even those not required by your settings).
"""
import os, sys, logging
root = logging.getLogger()
root.setLevel( 10 )
root.addHandler( logging.StreamHandler( sys.stdout ) )
lib = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "..", "lib" ) )
sys.path.append( lib )
from galaxy.eggs import Crate, GalaxyConfig
c = Crate()
c.parse()
galaxy_config = GalaxyConfig()
names = []
if len( sys.argv ) == 1:
names = c.get_names()
elif sys.argv[1] == 'all':
names = galaxy_config.always_conditional
else:
# Scramble a specific egg
egg = c.get( sys.argv[1] )
if egg is None:
print "error: %s not in eggs.ini" % sys.argv[1]
sys.exit( 1 )
egg.scramble()
sys.exit( 0 )
ignore = filter( lambda x: not galaxy_config.check_conditional( x ), list( names ) )
c.scramble( ignore=ignore )
|
volpino/Yeps-EURAC
|
scripts/scramble.py
|
Python
|
mit
| 1,050
|
[
"Galaxy"
] |
8af5e0a8df0427bbd38ecf3449ff3ed4c251f4592bc0a0eb920ac61f566de15a
|
# Copyright (c) 2008-2009 Participatory Culture Foundation
# See LICENSE for details.
from datetime import datetime, timedelta
from django.core import mail
from django.conf import settings
from django.contrib.auth.models import User
from channelguide.channels.models import Channel
from channelguide.user_profile.models import UserProfile
from channelguide.notes.models import ChannelNote, ModeratorPost
from channelguide.testframework import TestCase
from channelguide.notes.utils import get_note_info
class NotesTest(TestCase):
def setUp(self):
TestCase.setUp(self)
self.channel_owner = self.make_user('foobar')
self.channel_owner.email = 'someone@somewhere.net'
self.channel_owner.save()
self.channel = self.make_channel(self.channel_owner)
self.non_owner = self.make_user('tony')
self.moderator = self.make_user('greg', group='cg_moderator')
self.moderator_note = self.make_note('foo')
self.owner_note = self.make_note('bar')
self.channel.notes.add(self.moderator_note)
self.channel.notes.add(self.owner_note)
def make_note(self, body):
return ChannelNote(user=self.channel_owner, body=body)
def assertSameNote(self, note1, note2):
self.assertEquals(note1.title, note2.title)
def assertSameNoteList(self, notelist1, notelist2):
self.assertEquals(len(notelist1), len(notelist2))
for note1, note2 in zip(notelist1, notelist2):
self.assertSameNote(note1, note2)
def test_add_note(self):
notes = ChannelNote.objects.all()
self.assertEquals(len(notes), 2)
self.assertSameNote(notes[0], self.moderator_note)
self.assertSameNote(notes[1], self.owner_note)
def check_non_owner_notes(self, notes):
self.assertEquals(notes, [])
def check_owner_notes(self, notes):
self.assertSameNoteList(notes, [self.owner_note, self.moderator_note])
def test_get_note_info(self):
self.check_non_owner_notes(
get_note_info(self.channel, self.non_owner))
self.check_owner_notes(
get_note_info(self.channel, self.channel_owner))
self.check_owner_notes(
get_note_info(self.channel, self.moderator))
def test_channel_page(self):
"""
On the channel edit page, if the user is not the owner or no user is
logged in, no notes should be displayed. If the user is the owner,
display the notes for the owner. If the user is a moderator, display
all the notes.
"""
channel_path = "/channels/edit/%d" % self.channel.id
page = self.get_page(channel_path, self.channel_owner)
self.check_owner_notes(page.context[0]['notes'])
page = self.get_page(channel_path, self.moderator)
self.check_owner_notes(page.context[0]['notes'])
class NotesPageTestBase(TestCase):
def setUp(self):
TestCase.setUp(self)
self.user = self.make_user('fred')
self.moderator = self.make_user('greg', group='cg_moderator')
self.supermod = self.make_user('henrietta', group=['cg_supermoderator',
'cg_moderator'])
self.random_user = self.make_user("jane")
self.user.email = 'someone@somewhere.net'
self.user.save()
self.channel = self.make_channel(self.user)
def make_note_post_data(self):
post_data = {
'channel-id': self.channel.id,
'body': 'test body',
}
return post_data
class NotesPageTest(NotesPageTestBase):
def test_add_note(self):
self.login(self.user)
post_data = self.make_note_post_data()
self.post_data("/notes/new", post_data)
self.check_note_count(1)
self.assertEquals(self.channel.notes.all()[0].body, post_data['body'])
self.assertEquals(len(mail.outbox), 0)
self.login(self.moderator)
self.post_data("/notes/new", post_data)
self.check_note_count(2)
self.assertEquals(len(mail.outbox), 1)
def add_note_to_channel(self):
start_count = self.get_note_count()
note = ChannelNote(self.user, 'test')
self.channel.notes.add(note)
self.check_note_count(start_count + 1)
return note
def get_note_count(self):
return self.channel.notes.count()
def check_note_count(self, correct_count):
self.assertEquals(self.get_note_count(), correct_count)
def check_can_add(self, user, can_add):
if user is not None:
self.login(user)
start_count = self.get_note_count()
page = self.post_data("/notes/new", self.make_note_post_data())
if can_add:
self.check_note_count(start_count + 1)
else:
self.assertLoginRedirect(page)
def test_add_auth(self):
self.check_can_add(None, False)
self.check_can_add(self.random_user, False)
self.check_can_add(self.user, True)
self.check_can_add(self.moderator, True)
self.check_can_add(self.supermod, True)
def check_can_email(self, user, can_email):
start_count = len(mail.outbox)
if user is not None:
self.login(user)
self.post_data("/notes/new", self.make_note_post_data())
if can_email:
self.assertEquals(len(mail.outbox), start_count + 1)
else:
self.assertEquals(len(mail.outbox), start_count)
def test_email_from(self):
self.login(self.moderator)
self.post_data("/notes/new", self.make_note_post_data())
self.assertEquals(mail.outbox[0].from_email, settings.EMAIL_FROM)
def test_channel_link(self):
self.login(self.moderator)
self.post_data("/notes/new", self.make_note_post_data())
self.assert_(self.channel.get_absolute_url() in
mail.outbox[0].body)
def test_email_auth(self):
self.check_can_email(None, False)
self.check_can_email(self.random_user, False)
self.check_can_email(self.user, False)
self.check_can_email(self.moderator, True)
self.check_can_email(self.supermod, True)
class ModeratorPostTest(TestCase):
def setUp(self):
TestCase.setUp(self)
self.user = self.make_user('user')
self.mod = self.make_user('mod', group='cg_moderator')
self.supermod = self.make_user('supermod', group=['cg_moderator',
'cg_supermoderator'])
self.new_post_data = {
'title': 'test title',
'body': 'test body',
}
self.new_post_data_email = self.new_post_data.copy()
self.new_post_data_email['send-email'] = 1
def add_post(self):
ModeratorPost(user=self.mod, title='test', body='test').save()
def get_post_count(self):
return ModeratorPost.objects.count()
def test_board(self):
self.post_data('/notes/new-moderator-post', self.new_post_data,
login_as=self.supermod)
response = self.get_page('/notes/moderator-board', login_as=self.mod)
self.assertEquals(response.status_code, 200)
page = response.context['page']
self.assertEquals(page.object_list.count(), 1)
self.assertEquals(page.object_list[0].title,
self.new_post_data['title'])
self.assertEquals(page.object_list[0].body,
self.new_post_data['body'])
def test_view_auth(self):
self.assertLoginRedirect("/notes/moderator-board")
self.assertLoginRedirect("/notes/moderator-board", self.user)
self.assertCanAccess("/notes/moderator-board", self.mod)
self.assertCanAccess("/notes/moderator-board", self.supermod)
def check_add_auth(self, user, can_add):
start_count = self.get_post_count()
page = self.post_data('/notes/new-moderator-post', self.new_post_data,
login_as=user)
if can_add:
self.assertEqual(self.get_post_count(), start_count + 1)
else:
self.assertLoginRedirect(page)
self.assertEqual(self.get_post_count(), start_count)
def test_add(self):
self.check_add_auth(None, False)
self.check_add_auth(self.user, False)
self.check_add_auth(self.mod, True)
self.check_add_auth(self.supermod, True)
def check_delete_auth(self, user, can_delete):
start_count = self.get_post_count()
post = ModeratorPost.objects.all()[0]
page = self.post_data('/notes/post-%d' % post.id,
{'action' : 'delete'}, login_as=user)
if can_delete:
self.assertEqual(self.get_post_count(), start_count - 1)
else:
self.assertLoginRedirect(page)
self.assertEqual(self.get_post_count(), start_count)
def test_delete(self):
for i in range(5):
self.add_post()
self.check_delete_auth(None, False)
self.check_delete_auth(self.user, False)
self.check_delete_auth(self.mod, False)
self.check_delete_auth(self.supermod, True)
def moderators(self):
return [user for user in User.objects.exclude(
userprofile__moderator_board_email=UserProfile.NO_EMAIL)
if user.has_perm('notes.add_moderatorpost')]
def check_email_auth(self, user, can_email):
start_count = len(mail.outbox)
self.post_data('/notes/new-moderator-post',
self.new_post_data_email, login_as=user)
if can_email:
self.assertEqual(len(mail.outbox), start_count +
len(self.moderators()))
self.assertEquals(mail.outbox[-1].from_email, user.email)
else:
self.assertEqual(len(mail.outbox), start_count)
def test_email(self):
self.check_email_auth(None, False)
self.check_email_auth(self.user, False)
self.check_email_auth(self.mod, True)
self.check_email_auth(self.supermod, True)
def test_email_without_email(self):
self.supermod.email = ''
self.supermod.save()
self.post_data('/notes/new-moderator-post',
self.new_post_data_email, login_as=self.supermod)
self.assertEquals(mail.outbox[-1].from_email, settings.EMAIL_FROM)
def test_to_lines(self):
self.post_data('/notes/new-moderator-post',
self.new_post_data_email, login_as=self.supermod)
sent_emails = set()
for e in mail.outbox:
for recipient in e.recipients():
if recipient in sent_emails:
raise AssertionError("Duplicate to")
sent_emails.add(recipient)
mod_emails = [mod.email for mod in self.moderators()]
self.assertSameSet(sent_emails, mod_emails)
class WaitingForReplyTest(NotesPageTestBase):
def make_user_post(self):
self.login(self.user)
self.post_data("/notes/new", self.make_note_post_data())
def test_waiting_for_reply(self):
self.assertEquals(self.channel.waiting_for_reply_date, None)
self.make_user_post()
updated = Channel.objects.get(pk=self.channel.pk)
self.assertNotEqual(updated.waiting_for_reply_date, None)
timediff = datetime.now() - updated.waiting_for_reply_date
self.assert_(timediff < timedelta(seconds=1))
def test_waiting_for_reply_moderator_post(self):
self.make_user_post()
self.login(self.moderator)
self.post_data("/notes/new", self.make_note_post_data())
updated = Channel.objects.get(pk=self.channel.pk)
self.assertEquals(updated.waiting_for_reply_date, None)
def test_waiting_for_reply_moderator_unflag(self):
self.make_user_post()
data = {'action': 'mark-replied'}
response = self.post_data('/channels/%d' % self.channel.id, data)
self.assertLoginRedirect(response)
updated = Channel.objects.get(pk=self.channel.pk)
self.assertNotEqual(updated.waiting_for_reply_date, None)
self.login(self.moderator)
response = self.post_data('/channels/%d' % self.channel.id, data)
updated = Channel.objects.get(pk=self.channel.pk)
self.assertEqual(updated.waiting_for_reply_date, None)
def test_waiting_for_reply_order(self):
channel1 = self.make_channel(self.user)
channel2 = self.make_channel(self.user)
channel3 = self.make_channel(self.user)
channel1.waiting_for_reply_date = datetime.now()
channel2.waiting_for_reply_date = datetime.now() - timedelta(days=1)
channel3.waiting_for_reply_date = datetime.now() - timedelta(days=2)
channel1.save()
channel2.save()
channel3.save()
self.login(self.moderator)
page = self.get_page('/moderate/waiting')
page_channel_ids = [c.id for c in page.context[0]['page'].object_list]
self.assertEquals(page_channel_ids,
[channel3.id, channel2.id, channel1.id])
class EmailDisableTest(TestCase):
def setUp(self):
TestCase.setUp(self)
self.jane = self.make_user("jane", group='cg_moderator')
self.bob = self.make_user("bob", group='cg_moderator')
self.brian = self.make_user("brian", group='cg_moderator')
self.stacy = self.make_user("stacy")
self.channel = self.make_channel(self.stacy)
def check_email_list(self, *recipients):
correct_list = [user.email for user in recipients]
self.assertSameSet(self.email_recipients(), correct_list)
def test_disable_moderater_post_emails(self):
self.jane.get_profile().moderator_board_email = UserProfile.NO_EMAIL
self.bob.get_profile().moderator_board_email = UserProfile.SOME_EMAIL
self.brian.get_profile().moderator_board_email = UserProfile.ALL_EMAIL
self.jane.get_profile().save()
self.bob.get_profile().save()
self.brian.get_profile().save()
note = ModeratorPost(user=self.bob,
title='hi',
body='body')
note.send_email(True)
self.check_email_list(self.bob, self.brian)
mail.outbox = []
note.send_email(False)
self.check_email_list(self.brian)
|
kmshi/miroguide
|
channelguide/notes/tests.py
|
Python
|
agpl-3.0
| 14,380
|
[
"Brian"
] |
43fd3d5c16b50b4d7d00e4b15055c92073a6ddcba09161f92a0463b7bec0efaf
|
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
"""
from kameleon_mcmc.distribution.Gaussian import Gaussian
from kameleon_mcmc.experiments.SingleChainExperiment import SingleChainExperiment
from kameleon_mcmc.gp.GPData import GPData
from kameleon_mcmc.gp.mcmc.PseudoMarginalHyperparameterDistribution import \
PseudoMarginalHyperparameterDistribution
from kameleon_mcmc.kernel.GaussianKernel import GaussianKernel
from kameleon_mcmc.mcmc.MCMCChain import MCMCChain
from kameleon_mcmc.mcmc.MCMCParams import MCMCParams
from kameleon_mcmc.mcmc.output.PlottingOutput import PlottingOutput
from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput
from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import \
AdaptiveMetropolisLearnScale
from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import KameleonWindowLearnScale
from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis
from matplotlib.pyplot import plot
from numpy.lib.twodim_base import eye
from numpy.ma.core import mean, std, ones, shape
from numpy.ma.extras import vstack, hstack
import os
import sys
if __name__ == '__main__':
# sample data
data_circle, labels_circle=GPData.sample_circle_data(n=40, seed_init=1)
data_rect, labels_rect=GPData.sample_rectangle_data(n=60, seed_init=1)
# combine
data=vstack((data_circle, data_rect))
labels=hstack((labels_circle, labels_rect))
dim=shape(data)[1]
# normalise data
data-=mean(data, 0)
data/=std(data,0)
# plot
idx_a=labels>0
idx_b=labels<0
plot(data[idx_a,0], data[idx_a,1],"ro")
plot(data[idx_b,0], data[idx_b,1],"bo")
# prior on theta and posterior target estimate
theta_prior=Gaussian(mu=0*ones(dim), Sigma=eye(dim)*5)
target=PseudoMarginalHyperparameterDistribution(data, labels, \
n_importance=100, prior=theta_prior, \
ridge=1e-3)
# create sampler
burnin=10000
num_iterations=burnin+300000
kernel = GaussianKernel(sigma=35.0)
sampler=KameleonWindowLearnScale(target, kernel, stop_adapt=burnin)
# sampler=AdaptiveMetropolisLearnScale(target)
# sampler=StandardMetropolis(target)
start=0.0*ones(target.dimension)
params = MCMCParams(start=start, num_iterations=num_iterations, burnin=burnin)
# create MCMC chain
chain=MCMCChain(sampler, params)
chain.append_mcmc_output(StatisticsOutput(print_from=0, lag=100))
# chain.append_mcmc_output(PlottingOutput(plot_from=0, lag=500))
# create experiment instance to store results
experiment_dir = str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep
experiment = SingleChainExperiment(chain, experiment_dir)
experiment.run()
sigma=GaussianKernel.get_sigma_median_heuristic(experiment.mcmc_chain.samples.T)
print "median kernel width", sigma
|
karlnapf/kameleon-mcmc
|
kameleon_mcmc/gp/scripts/circle_rect_hybrid_gaussian_ard.py
|
Python
|
bsd-2-clause
| 3,207
|
[
"Gaussian"
] |
351c11575f48e262acfe34a0ae9845283f25a88ccf5752e4241f12f7da5f291a
|
from gpaw.utilities import unpack
from ase.units import Hartree
import cPickle
import numpy as np
from gpaw.mpi import world, rank, send_string, receive_string, broadcast_string
from gpaw.utilities.blas import gemm
from gpaw.utilities.timing import Timer
from gpaw.utilities.lapack import inverse_general
import copy
import _gpaw
def tw(mat, filename):
fd = file(filename, 'wb')
cPickle.dump(mat, fd, 2)
fd.close()
def tr(filename):
fd = file(filename, 'r')
mat = cPickle.load(fd)
fd.close()
return mat
def write(filename, name, data, dimension, dtype=float):
import gpaw.io.tar as io
if world.rank == 0:
w = io.Writer(filename)
dim = ()
for i in range(len(dimension)):
w.dimension(str(i), dimension[i])
dim += (str(i),)
w.add(name, dim, dtype=dtype)
w.fill(data)
w.close()
def fermidistribution(energy, kt):
#fermi level is fixed to zero
return 1.0 / (1.0 + np.exp(energy / kt))
def zeroTFermi(energy):
if np.real(energy) > 0:
return 0.
else:
return 1.
def get_tri_type(mat):
#mat is lower triangular or upper triangular matrix
tol = 1e-10
mat = abs(mat)
dim = mat.shape[-1]
sum = [0, 0]
for i in range(dim):
sum[0] += np.trace(mat, -j)
sum[1] += np.trace(mat, j)
diff = sum[0] - sum[1]
if diff >= 0:
ans = 'L'
elif diff < 0:
ans = 'U'
if abs(diff) < tol:
print 'Warning: can not define the triangular matrix'
return ans
def tri2full(M,UL='L'):
"""UP='L' => fill upper triangle from lower triangle
such that M=M^d"""
nbf = len(M)
if UL=='L':
for i in range(nbf-1):
M[i,i:] = M[i:,i].conjugate()
elif UL=='U':
for i in range(nbf-1):
M[i:,i] = M[i,i:].conjugate()
def dagger(matrix):
return np.conj(matrix.T)
def get_matrix_index(ind1, ind2=None):
if ind2 == None:
dim1 = len(ind1)
return np.resize(ind1, (dim1, dim1))
else:
dim1 = len(ind1)
dim2 = len(ind2)
return np.resize(ind1, (dim2, dim1)).T, np.resize(ind2, (dim1, dim2))
def aa1d(a, d=2):
# array average in one dimension
dim = a.shape
b = [np.sum(np.take(a, [i], axis=d)) for i in range(dim[d])]
b = np.array(b)
b = (b * dim[d]) / np.product(dim)
return b
def aa2d(a, d=0):
# array average in two dimensions
b = np.sum(a, axis=d) / a.shape[d]
return b
def k2r_hs(h_skmm, s_kmm, ibzk_kc, weight_k, R_c=(0,0,0), magnet=None):
phase_k = np.dot(2 * np.pi * ibzk_kc, R_c)
c_k = np.exp(1.0j * phase_k) * weight_k
c_k.shape = (len(ibzk_kc),1,1)
if h_skmm != None:
nbf = h_skmm.shape[-1]
nspins = len(h_skmm)
h_smm = np.empty((nspins, nbf, nbf),complex)
for s in range(nspins):
h_smm[s] = np.sum((h_skmm[s] * c_k), axis=0)
if s_kmm != None:
nbf = s_kmm.shape[-1]
s_mm = np.empty((nbf, nbf),complex)
s_mm[:] = np.sum((s_kmm * c_k), axis=0)
#if magnet is not None:
# MM = magnet.trans_matrix(diag=True)
# assert np.sum(R_c) == R_c[2]
# MM = MM ** R_c[2]
# if h_skmm != None:
# for s in range(nspins):
# h_smm[s] *= MM
# if s_kmm != None:
# s_mm *= MM
if h_skmm != None and s_kmm != None:
return h_smm, s_mm
elif h_skmm == None:
return s_mm
elif s_kmm == None:
return h_smm
def r2k_hs(h_srmm, s_rmm, R_vector, kvector=(0,0,0), magnet=None):
phase_k = np.dot(2 * np.pi * R_vector, kvector)
c_k = np.exp(-1.0j * phase_k)
c_k.shape = (len(R_vector), 1, 1)
if h_srmm != None:
nbf = h_srmm.shape[-1]
nspins = len(h_srmm)
h_smm = np.empty((nspins, nbf, nbf), complex)
for s in range(nspins):
h_smm[s] = np.sum((h_srmm[s] * c_k), axis=0)
if s_rmm != None:
nbf = s_rmm.shape[-1]
s_mm = np.empty((nbf, nbf), complex)
s_mm[:] = np.sum((s_rmm * c_k), axis=0)
# if magnet is not None:
# MM = magnet.trans_matrix(diag=True)
# assert np.sum(R_vector) == R_vector[2]
# MM = MM ** (-R_vector[2])
# if h_srmm != None:
# for s in range(nspins):
# h_smm[s] *= MM
# if s_mm != None:
# s_mm *= MM
if h_srmm != None and s_rmm != None:
return h_smm, s_mm
elif h_srmm == None:
return s_mm
elif s_rmm == None:
return h_smm
def collect_lead_mat(lead_hsd, lead_couple_hsd, s, pk, flag='S'):
diag_h = []
upc_h = []
dwnc_h = []
for i, hsd, c_hsd in zip(range(len(lead_hsd)), lead_hsd, lead_couple_hsd):
if flag == 'S':
band_mat, cp_mat = hsd.S[pk], c_hsd.S[pk]
elif flag == 'H':
band_mat, cp_mat = hsd.H[s][pk], c_hsd.H[s][pk]
else:
band_mat, cp_mat = hsd.D[s][pk], c_hsd.D[s][pk]
diag_h.append(copy.deepcopy(band_mat))
upc_h.append(cp_mat.recover('c'))
dwnc_h.append(cp_mat.recover('n'))
return diag_h, upc_h, dwnc_h
def get_hs(atoms):
"""Calculate the Hamiltonian and overlap matrix."""
calc = atoms.calc
wfs = calc.wfs
wfs.gd.comm.broadcast(wfs.S_qMM, 0)
Ef = calc.get_fermi_level()
eigensolver = wfs.eigensolver
ham = calc.hamiltonian
S_qMM = wfs.S_qMM.copy()
for S_MM in S_qMM:
tri2full(S_MM)
H_sqMM = np.empty((wfs.nspins,) + S_qMM.shape, wfs.dtype)
for kpt in wfs.kpt_u:
H_MM = eigensolver.calculate_hamiltonian_matrix(ham, wfs, kpt)
tri2full(H_MM)
H_MM *= Hartree
#H_MM -= Ef * S_qMM[kpt.q]
H_sqMM[kpt.s, kpt.q] = H_MM
wfs.gd.comm.broadcast(H_sqMM, 0)
return H_sqMM, S_qMM
def substract_pk(d, npk, ntk, kpts, k_mm, hors='s', position=[0, 0, 0], magnet=None):
weight = np.array([1.0 / ntk] * ntk )
if hors not in 'hs':
raise KeyError('hors should be h or s!')
if hors == 'h':
dim = k_mm.shape[:]
dim = (dim[0],) + (dim[1] // ntk,) + dim[2:]
pk_mm = np.empty(dim, k_mm.dtype)
dim = (dim[0],) + (ntk,) + dim[2:]
tk_mm = np.empty(dim, k_mm.dtype)
elif hors == 's':
dim = k_mm.shape[:]
dim = (dim[0] // ntk,) + dim[1:]
pk_mm = np.empty(dim, k_mm.dtype)
dim = (ntk,) + dim[1:]
tk_mm = np.empty(dim, k_mm.dtype)
tkpts = pick_out_tkpts(d, npk, ntk, kpts)
for i in range(npk):
n = i * ntk
for j in range(ntk):
if hors == 'h':
tk_mm[:, j] = np.copy(k_mm[:, n + j])
elif hors == 's':
tk_mm[j] = np.copy(k_mm[n + j])
if hors == 'h':
pk_mm[:, i] = k2r_hs(tk_mm, None, tkpts, weight, position, magnet)
elif hors == 's':
pk_mm[i] = k2r_hs(None, tk_mm, tkpts, weight, position, magnet)
return pk_mm
def pick_out_tkpts(d, npk, ntk, kpts):
tkpts = np.zeros([ntk, 3])
for i in range(ntk):
tkpts[i, d] = kpts[i, d]
return tkpts
def count_tkpts_num(d, kpts):
tol = 1e-6
tkpts = [kpts[0]]
for kpt in kpts:
flag = False
for tkpt in tkpts:
if abs(kpt[d] - tkpt[d]) < tol:
flag = True
if not flag:
tkpts.append(kpt)
return len(tkpts)
def dot(a, b, transa='n'):
assert len(a.shape) == 2 and a.shape[1] == b.shape[0]
dtype = complex
if a.dtype == complex and b.dtype == complex:
c = a
d = b
elif a.dtype == float and b.dtype == complex:
c = np.array(a, complex)
d = b
elif a.dtype == complex and b.dtype == float:
d = np.array(b, complex)
c = a
else:
dtype = float
c = a
d = b
e = np.zeros([c.shape[0], d.shape[1]], dtype)
gemm(1.0, np.ascontiguousarray(d), np.ascontiguousarray(c), 0.0, e, transa)
return e
def gcd(m,n):
while n:
m,n=n,m%n
return m
def plot_diag(mtx, ind=1):
import pylab
dim = mtx.shape
if len(dim) != 2:
print 'Warning! check the dimenstion of the matrix'
if dim[0] != dim[1]:
print 'Warinng! check if the matrix is square'
diag_element = np.diag(mtx)
y_data = pick(diag_element, ind)
x_data = range(len(y_data))
pylab.plot(x_data, y_data,'b-o')
pylab.show()
def get_atom_indices(subatoms, setups):
basis_list = [setup.nao for setup in setups]
index = []
for j, lj in zip(subatoms, range(len(subatoms))):
begin = np.sum(np.array(basis_list[:j], int))
for n in range(basis_list[j]):
index.append(begin + n)
return np.array(index, int)
def mp_distribution(e, kt, n=1):
x = e / kt
re = 0.5 * error_function(x)
for i in range(n):
re += coff_function(i + 1) * hermite_poly(2 * i + 1, x) * np.exp(-x**2)
return re
def coff_function(n):
return (-1)**n / (np.product(np.arange(1, n + 1)) * 4.** n * np.sqrt(np.pi))
def hermite_poly(n, x):
if n == 0:
return 1
elif n == 1:
return 2 * x
else:
return 2 * x * hermite_poly(n - 1, x) \
- 2 * (n - 1) * hermite_poly(n - 2 , x)
def error_function(x):
z = abs(x)
t = 1. / (1. + 0.5*z)
r = t * np.exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+
t*(.09678418+t*(-.18628806+t*(.27886807+
t*(-1.13520398+t*(1.48851587+t*(-.82215223+
t*.17087277)))))))))
if (x >= 0.):
return r
else:
return 2. - r
def sum_by_unit(x, unit):
dim = x.shape[0]
dim1 = int(np.ceil(dim / unit))
y = np.empty([dim1], dtype=x.dtype)
for i in range(dim1 - 1):
y[i] = np.sum(x[i * unit: (i + 1) * unit]) / unit
y[0] = y[1]
y[-1] = y[-2]
return y
def diag_cell(cell):
if len(cell.shape) == 2:
cell = np.diag(cell)
return cell
def get_pk_hsd(d, ntk, kpts, hl_skmm, sl_kmm, dl_skmm, txt=None,
dtype=complex, direction=0, magnet=None):
npk = len(kpts) // ntk
position = [0, 0, 0]
hl_spkmm = substract_pk(d, npk, ntk, kpts, hl_skmm, hors='h', magnet=magnet)
dl_spkmm = substract_pk(d, npk, ntk, kpts, dl_skmm, hors='h', magnet=magnet)
sl_pkmm = substract_pk(d, npk, ntk, kpts, sl_kmm, hors='s', magnet=magnet)
if direction==0:
position[d] = 1
else:
position[d] = -1
hl_spkcmm = substract_pk(d, npk, ntk, kpts, hl_skmm, 'h', position, magnet=magnet)
dl_spkcmm = substract_pk(d, npk, ntk, kpts, dl_skmm, 'h', position, magnet=magnet)
sl_pkcmm = substract_pk(d, npk, ntk, kpts, sl_kmm, 's', position, magnet=magnet)
tol = 1e-6
position[d] = 2.0
s_test = substract_pk(d, npk, ntk, kpts, sl_kmm, 's', position, magnet=magnet)
matmax = np.max(abs(s_test))
if matmax > tol:
if txt != None:
txt('Warning*: the principle layer should be larger, \
matmax=%f' % matmax)
else:
print 'Warning*: the principle layer should be larger, \
matmax=%f' % matmax
if dtype == float:
hl_spkmm = np.real(hl_spkmm).copy()
sl_pkmm = np.real(sl_pkmm).copy()
dl_spkmm = np.real(dl_spkmm).copy()
hl_spkcmm = np.real(hl_spkcmm).copy()
sl_pkcmm = np.real(sl_pkcmm).copy()
dl_spkcmm = np.real(dl_spkcmm).copy()
return hl_spkmm, sl_pkmm, dl_spkmm * ntk, hl_spkcmm, \
sl_pkcmm, dl_spkcmm * ntk
def get_lcao_density_matrix(calc):
wfs = calc.wfs
kpts = wfs.ibzk_qc
nq = len(kpts)
my_ns = len(wfs.kpt_u) // nq
nao = wfs.setups.nao
# calculate_density_matrix involves gemm and doesn't work well with empty()
d_skmm = np.zeros([my_ns, nq, nao, nao], wfs.dtype)
for kpt in wfs.kpt_u:
if my_ns == 1:
wfs.calculate_density_matrix(kpt.f_n, kpt.C_nM, d_skmm[0, kpt.q])
else:
wfs.calculate_density_matrix(kpt.f_n, kpt.C_nM, d_skmm[kpt.s, kpt.q])
return d_skmm
def collect_atomic_matrices(asp, setups, ns, comm, rank_a):
all_asp = []
for a, setup in enumerate(setups):
sp = asp.get(a)
if sp is None:
ni = setup.ni
sp = np.empty((ns, ni * (ni + 1) // 2))
if comm.size > 1:
comm.broadcast(sp, rank_a[a])
all_asp.append(sp)
return all_asp
def distribute_atomic_matrices(all_asp, asp, setups):
for a in range(len(setups)):
if asp.get(a) is not None:
asp[a] = all_asp[a]
def collect_and_distribute_atomic_matrices(D_ap, setups, setups0, rank_a, comm, keys):
gD_ap = []
D_ap0 = [None] * len(keys)
for a, setup in enumerate(setups):
if a not in keys:
ni = setup.ni
sp = np.empty((ni * (ni + 1) // 2))
else:
sp = D_ap[keys.index(a)]
if comm.size > 1:
comm.broadcast(sp, rank_a[a])
gD_ap.append(sp)
for a in range(len(setups0)):
if a in keys:
D_ap0[keys.index(a)] = gD_ap[a]
return D_ap0
def generate_selfenergy_database(atoms, ntk, filename, direction=0, kt=0.1,
bias=[-3,3], depth=3, comm=None):
from gpaw.transport.sparse_matrix import Banded_Sparse_HSD, CP_Sparse_HSD, Se_Sparse_Matrix
from gpaw.transport.selfenergy import LeadSelfEnergy
from gpaw.transport.contour import Contour
hl_skmm, sl_kmm = get_hs(atoms)
dl_skmm = get_lcao_density_matrix(atoms.calc)
fermi = atoms.calc.get_fermi_level()
wfs = atoms.calc.wfs
hl_spkmm, sl_pkmm, dl_spkmm, \
hl_spkcmm, sl_pkcmm, dl_spkcmm = get_pk_hsd(2, ntk,
wfs.ibzk_qc,
hl_skmm, sl_kmm, dl_skmm,
None, wfs.dtype,
direction=direction)
my_npk = len(wfs.ibzk_qc) / ntk
my_nspins = len(wfs.kpt_u) / ( my_npk * ntk)
lead_hsd = Banded_Sparse_HSD(wfs.dtype, my_nspins, my_npk)
lead_couple_hsd = CP_Sparse_HSD(wfs.dtype, my_nspins, my_npk)
for pk in range(my_npk):
lead_hsd.reset(0, pk, sl_pkmm[pk], 'S', init=True)
lead_couple_hsd.reset(0, pk, sl_pkcmm[pk], 'S', init=True)
for s in range(my_nspins):
lead_hsd.reset(s, pk, hl_spkmm[s, pk], 'H', init=True)
lead_hsd.reset(s, pk, dl_spkmm[s, pk], 'D', init=True)
lead_couple_hsd.reset(s, pk, hl_spkcmm[s, pk], 'H', init=True)
lead_couple_hsd.reset(s, pk, dl_spkcmm[s, pk], 'D', init=True)
lead_se = LeadSelfEnergy(lead_hsd, lead_couple_hsd)
contour = Contour(kt, [fermi] * 2, bias, depth, comm=comm)
path = contour.get_plot_path(ex=True)
for nid, energy in zip(path.my_nids, path.my_energies):
for kpt in wfs.kpt_u:
if kpt.q % ntk == 0:
flag = str(kpt.k // ntk) + '_' + str(nid)
lead_se.s = kpt.s
lead_se.pk = kpt.q // ntk
data = lead_se(energy)
fd = file(flag, 'w')
cPickle.dump(data, fd, 2)
fd.close()
def test_selfenergy_interpolation(atoms, ntk, filename, begin, end, base, scale, direction=0):
from gpaw.transport.sparse_matrix import Banded_Sparse_HSD, CP_Sparse_HSD, Se_Sparse_Matrix
from gpaw.transport.selfenergy import LeadSelfEnergy
from gpaw.transport.contour import Contour
hl_skmm, sl_kmm = get_hs(atoms)
dl_skmm = get_lcao_density_matrix(atoms.calc)
fermi = atoms.calc.get_fermi_level()
wfs = atoms.calc.wfs
hl_spkmm, sl_pkmm, dl_spkmm, \
hl_spkcmm, sl_pkcmm, dl_spkcmm = get_pk_hsd(2, ntk,
wfs.ibzk_qc,
hl_skmm, sl_kmm, dl_skmm,
None, wfs.dtype,
direction=direction)
my_npk = len(wfs.ibzk_qc) / ntk
my_nspins = len(wfs.kpt_u) / ( my_npk * ntk)
lead_hsd = Banded_Sparse_HSD(wfs.dtype, my_nspins, my_npk)
lead_couple_hsd = CP_Sparse_HSD(wfs.dtype, my_nspins, my_npk)
for pk in range(my_npk):
lead_hsd.reset(0, pk, sl_pkmm[pk], 'S', init=True)
lead_couple_hsd.reset(0, pk, sl_pkcmm[pk], 'S', init=True)
for s in range(my_nspins):
lead_hsd.reset(s, pk, hl_spkmm[s, pk], 'H', init=True)
lead_hsd.reset(s, pk, dl_spkmm[s, pk], 'D', init=True)
lead_couple_hsd.reset(s, pk, hl_spkcmm[s, pk], 'H', init=True)
lead_couple_hsd.reset(s, pk, dl_spkcmm[s, pk], 'D', init=True)
lead_se = LeadSelfEnergy(lead_hsd, lead_couple_hsd)
begin += fermi
end += fermi
ee = np.linspace(begin, end, base)
cmp_ee = np.linspace(begin, end, base * scale)
se = []
cmp_se = []
from scipy import interpolate
for e in ee:
se.append(lead_se(e).recover())
se = np.array(se)
ne, ny, nz= se.shape
nie = len(cmp_ee)
data = np.zeros([nie, ny, nz], se.dtype)
for yy in range(ny):
for zz in range(nz):
ydata = se[:, yy, zz]
f = interpolate.interp1d(ee, ydata)
data[:, yy, zz] = f(cmp_ee)
inter_se_linear = data
for e in cmp_ee:
cmp_se.append(lead_se(e).recover())
fd = file(filename, 'w')
cPickle.dump((cmp_se, inter_se_linear, ee, cmp_ee), fd, 2)
fd.close()
for i,e in enumerate(cmp_ee):
print e, np.max(abs(cmp_se[i] - inter_se_linear[i])), 'linear', np.max(abs(cmp_se[i]))
def path_selfenergy(atoms, ntk, filename, begin, end, num= 257, direction=0):
from gpaw.transport.sparse_matrix import Banded_Sparse_HSD, CP_Sparse_HSD, Se_Sparse_Matrix
from gpaw.transport.selfenergy import LeadSelfEnergy
from gpaw.transport.contour import Contour
hl_skmm, sl_kmm = get_hs(atoms)
dl_skmm = get_lcao_density_matrix(atoms.calc)
fermi = atoms.calc.get_fermi_level()
wfs = atoms.calc.wfs
hl_spkmm, sl_pkmm, dl_spkmm, \
hl_spkcmm, sl_pkcmm, dl_spkcmm = get_pk_hsd(2, ntk,
wfs.ibzk_qc,
hl_skmm, sl_kmm, dl_skmm,
None, wfs.dtype,
direction=direction)
my_npk = len(wfs.ibzk_qc) / ntk
my_nspins = len(wfs.kpt_u) / ( my_npk * ntk)
lead_hsd = Banded_Sparse_HSD(wfs.dtype, my_nspins, my_npk)
lead_couple_hsd = CP_Sparse_HSD(wfs.dtype, my_nspins, my_npk)
for pk in range(my_npk):
lead_hsd.reset(0, pk, sl_pkmm[pk], 'S', init=True)
lead_couple_hsd.reset(0, pk, sl_pkcmm[pk], 'S', init=True)
for s in range(my_nspins):
lead_hsd.reset(s, pk, hl_spkmm[s, pk], 'H', init=True)
lead_hsd.reset(s, pk, dl_spkmm[s, pk], 'D', init=True)
lead_couple_hsd.reset(s, pk, hl_spkcmm[s, pk], 'H', init=True)
lead_couple_hsd.reset(s, pk, dl_spkcmm[s, pk], 'D', init=True)
lead_se = LeadSelfEnergy(lead_hsd, lead_couple_hsd)
begin += fermi
end += fermi
ee = np.linspace(begin, end, num)
se = []
for e in ee:
se.append(lead_se(e).recover())
se = np.array(se)
fd = file(filename + '_' + str(world.rank), 'w')
cPickle.dump((se, ee), fd, 2)
fd.close()
def sort_atoms(atoms):
pos = atoms.positions.copy()
ind2 = fuzzy_sort(pos[:,2])
ind1 = fuzzy_sort(pos[:,1])
ind0 = fuzzy_sort(pos[:,0])
tmp = ind2 * 1e12 + ind1 * 1e6 + ind0
indices = np.argsort(tmp)
atoms.positions = atoms.positions[indices]
atoms.numbers = atoms.numbers[indices]
if 'magmoms' in atoms.arrays:
atoms.arrays['magmoms'] = atoms.arrays['magmoms'][indices]
def fuzzy_sort(seq0, tol=1e-6):
seq = seq0.copy()
ind0 = np.zeros_like(seq)
ind1 = []
n = 0
while len(ind1) < len(seq):
ind = []
am = np.argmin(seq)
tmp = seq - seq[am]
for j, i in enumerate(tmp):
if abs(i) < tol:
ind.append(j)
seq[ind] = 1e19
ind0[ind] = n
ind1 += ind
n += 1
return ind0
def cubicing(atoms):
cell = atoms._cell
positions = atoms.positions
print 'cubicing only ok to [a,0,0][a/2, b, 0],[0,0,c] type '
tol = 1e-6
if abs(cell[1,0]*2 - cell[0,0]) < tol:
print 'ok, possible to get a cubic structure'
natoms = len(positions)
new_pos = np.empty([natoms * 2, 3])
for pos, i in zip(positions, range(natoms)):
new_pos[i] = pos
new_pos[i + natoms] = pos + cell[1]
atoms += atoms
atoms.positions = new_pos
sort_atoms(atoms)
cell[1, 0] = 0
cell[1, 1] *= 2
atoms.set_cell(cell)
return atoms
class P_info:
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
self.Pxsign = 1
self.Pysign = 1
self.Pzsign = 1
self.N = 0
class D_info:
def __init__(self):
self.xy = 0
self.xz = 0
self.yz = 0
self.x2y2 = 0
self.z2r2 = 0
self.N = 0
def egodic(nums):
if len(nums)==1:
return np.array(nums)
else:
rows = np.product(np.arange(1, len(nums) + 1))
cols = len(nums)
all = np.zeros([rows, cols])
for i, n in enumerate(nums):
subrows = np.product(np.arange(1, len(nums)))
all[i*subrows: (i+1)*subrows, 0] = n
left_nums = nums[:]
left_nums.remove(n)
all[i*subrows: (i+1)*subrows, 1:] = egodic(left_nums)
return all
#PPP = egodic(range(3))
def PutP(index, X, P, T):
if P.N == 2:
P.x = index
if P.N == 0:
P.y = index
if P.N == 1:
P.z = index
P.N += 1
if P.N == 3:
bs = np.array([P.x, P.y, P.z])
#c = np.array([P.Pxsign, P.Pysign, P.Pzsign])
#c = np.resize(c, [3, 3])
#cf = c / c.T
#ind = np.resize(bs, [3, 3])
ind = get_matrix_index(bs)
T[ind.T, ind] = X
#T[ind.T, ind] = X * cf
P.__init__()
#DDD = egodic(range(5))
def PutD(index, X, D, T):
if D.N == 0:
D.xy = index
if D.N == 3:
D.xz = index
if D.N == 1:
D.yz = index
if D.N == 4:
D.x2y2 = index
if D.N == 2:
D.z2r2 = index
D.N += 1
if D.N == 5:
sqrt = np.sqrt
Dxy = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 0]])
D2xy = np.dot(X, Dxy)
D2xy = np.dot(D2xy, X.T)
Dxz = np.array([[0, 0, 1],
[0, 0, 0],
[1, 0, 0]])
D2xz = np.dot(X, Dxz)
D2xz = np.dot(D2xz, X.T)
Dyz = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0]])
D2yz = np.dot(X, Dyz)
D2yz = np.dot(D2yz, X.T)
Dx2y2 = np.array([[1, 0 , 0],
[0, -1, 0],
[0, 0, 0]])
D2x2y2 = np.dot(X, Dx2y2)
D2x2y2 = np.dot(D2x2y2, X.T)
Dz2r2 = np.array([[-1, 0, 0],
[0, -1, 0],
[0, 0, 2]]) / sqrt(3)
D2z2r2 = np.dot(X, Dz2r2)
D2z2r2 = np.dot(D2z2r2, X.T)
T[D.xy, D.xy] = D2xy[0, 1]
T[D.xz, D.xy] = D2xy[0, 2]
T[D.yz, D.xy] = D2xy[1, 2]
T[D.x2y2, D.xy] = (D2xy[0, 0] - D2xy[1, 1]) / 2
T[D.z2r2, D.xy] = sqrt(3) / 2 * D2xy[2, 2]
T[D.xy, D.xz] = D2xz[0, 1]
T[D.xz, D.xz] = D2xz[0, 2]
T[D.yz, D.xz] = D2xz[1, 2]
T[D.x2y2, D.xz] = (D2xz[0, 0] - D2xz[1, 1]) / 2
T[D.z2r2, D.xz] = sqrt(3) / 2 * D2xz[2,2];
T[D.xy , D.yz] = D2yz[0, 1]
T[D.xz , D.yz] = D2yz[0, 2]
T[D.yz , D.yz] = D2yz[1, 2]
T[D.x2y2, D.yz] = (D2yz[0, 0] - D2yz[1, 1]) / 2
T[D.z2r2, D.yz] = sqrt(3) / 2 * D2yz[2, 2]
T[D.xy , D.x2y2] = D2x2y2[0, 1]
T[D.xz , D.x2y2] = D2x2y2[0, 2]
T[D.yz , D.x2y2] = D2x2y2[1, 2]
T[D.x2y2, D.x2y2] = (D2x2y2[0, 0] - D2x2y2[1, 1]) / 2
T[D.z2r2, D.x2y2] = sqrt(3) / 2 * D2x2y2[2, 2]
T[D.xy, D.z2r2] = D2z2r2[0, 1]
T[D.xz, D.z2r2] = D2z2r2[0, 2]
T[D.yz, D.z2r2] = D2z2r2[1, 2]
T[D.x2y2, D.z2r2] = (D2z2r2[0, 0] - D2z2r2[1, 1]) / 2
T[D.z2r2, D.z2r2] = sqrt(3) / 2 * D2z2r2[2, 2]
D.__init__()
def orbital_matrix_rotate_transformation(X, orbital_indices):
nb = orbital_indices.shape[0]
assert len(X) == 3
T = np.zeros([nb, nb])
P = P_info()
D = D_info()
for i in range(nb):
if orbital_indices[i, 1] == 0:
T[i, i] = 1
elif orbital_indices[i, 1] == 1:
PutP(i, X, P, T)
elif orbital_indices[i, 1] == 2:
PutD(i, X, D, T)
else:
raise NotImplementError('undown shell name')
return T
def normalize(r):
return r/np.sqrt(np.sum(r*r))
def vector_to_paramid(r):
r = normalize(r)
x, y, z = r
if z!=0:
a1, b1, c1 = 0, 1, -y/z
a2, b2, c2 = 1, -x*y/(y**2 + z**2), -x*z/(y**2 + z**2)
elif y!=0:
a1, b1, c1 = 1, -x/y, 0
a2, b2, c2 = -x*z/(y**2 + x**2), -y*z/(y**2 + x**2), 1
elif x!=0:
a1, b1, c1 = -z/x, 0, 1
a2, b2, c2 = -y*z/(x**2 + z**2), 1, -x*y/(x**2 + z**2)
else:
raise RuntimeError('The input vector is zero!')
r1 = np.array([a1, b1, c1])
r2 = np.array([a2, b2, c2])
r1 = normalize(r1)
r2 = normalize(r2)
R1 = r + r1
R2 = r - r1 / 2. + r2 / 2.
R3 = r - r1 / 2. - r2 / 2.
return np.array([R1, R2, R3])
def transform_3d(rs1, rs2):
assert rs1.shape == rs2.shape
if rs1.shape[0] == 2:
r1 = rs1[1] - rs1[0]
r2 = rs2[1] - rs2[0]
RS1 = vector_to_paramid(r1)
RS2 = vector_to_paramid(r2)
elif rs1.shape[0] == 4:
RS1 = rs1[1:] - rs1[0]
RS2 = rs2[1:] - rs2[0]
else:
raise RuntimeError('Transform atoms indices wrong!')
X = np.dot(RS2.T, np.linalg.inv(RS1.T))
return X
def interpolate_2d(mat):
from gpaw.grid_descriptor import GridDescriptor
from gpaw.transformers import Transformer
nn = 10
N_c = np.zeros([3], dtype=int)
N_c[1:] = mat.shape[:2]
N_c[0] = nn
bmat = np.resize(mat, N_c)
gd = GridDescriptor(N_c, N_c)
finegd = GridDescriptor(N_c * 2, N_c)
interpolator = Transformer(gd, finegd, 3)
fine_bmat = finegd.zeros()
interpolator.apply(bmat, fine_bmat)
return fine_bmat[0]
def interpolate_array(array, gd, h, di='+'):
try:
from scipy import interpolate
ip = True
except ImportError:
ip = False
if not ip:
return array
dim = len(array.shape)
assert dim == 3 or dim == 4
spin_relate = dim == 4
if h <= gd.h_cv[2, 2]:
if di == '+':
x = np.arange(gd.N_c[2]) * gd.h_cv[2, 2]
xnew = np.arange(gd.N_c[2]) * h
else:
x = np.arange(-gd.N_c[2], 0) * gd.h_cv[2, 2]
xnew = np.arange(-gd.N_c[2], 0) * h
else:
if di == '+':
x = np.arange(gd.N_c[2] * 2) * gd.h_cv[2, 2]
xnew = np.arange(gd.N_c[2]) * h
else:
x = np.arange(-gd.N_c[2] * 2, 0) * gd.h_cv[2, 2]
xnew = np.arange(-gd.N_c[2], 0) * h
if spin_relate:
ns, nx, ny, nz = array.shape
array.shape = (ns * nx * ny, nz)
new_array = gd.zeros(ns, global_array=True)
new_array.shape = (ns * nx * ny, nz)
else:
nx, ny, nz = array.shape
array.shape = (nx * ny, nz)
new_array = gd.zeros(global_array=True)
new_array.shape = (nx * ny, nz)
if h > gd.h_cv[2, 2]:
array = np.append(array, array, 1)
for i, line in enumerate(array):
tck = interpolate.splrep(x, line, s=0)
new_array[i] = interpolate.splev(xnew, tck, der=0)
if spin_relate:
#array.shape = (ns, nx, ny, nz)
new_array.shape = (ns, nx, ny, nz)
else:
#array.shape = (nx, ny, nz)
new_array.shape = (nx, ny, nz)
return new_array
def eig_states_norm(orbital, s_mm):
#normalize orbital to satisfy orbital.T.conj()*SM*orbital=unit
norm_error = 1e-10
ortho_error = 1e-8
nstates = orbital.shape[1]
d_mm = np.dot(orbital.T.conj(), s_mm)
d_mm = np.dot(d_mm, orbital)
for i in range(1, nstates):
for j in range(i):
if abs(d_mm[j ,i]) > ortho_error:
orbital[:, i] -= orbital[:, j] * d_mm[j, i] / d_mm[j ,j]
d_mm = np.dot(orbital.T.conj(), s_mm)
d_mm = np.dot(d_mm, orbital)
for i in range(nstates):
orbital[:, i] /= np.sqrt(d_mm[i, i])
if orbital.shape[-1] == 0:
error = 0
else:
error = np.max(np.dot(np.dot(orbital.T.conj(), s_mm), orbital) -
np.eye(nstates)) / nstates
if abs(error) > norm_error:
print 'Warning! Normalization error %f' % error
return orbital
def shtm(l):
#Spherical Harmonics transformation matrix, from the complex to real
#The harmonics should be arranged in sequence -m, -m+1 ... m-1, m
n = 2 * l + 1
mtx = np.zeros([n,n], complex)
for i in range(n):
if i < l:
#mtx[i, i] = 1.j*(-1.)**(l - i) / np.sqrt(2)
#mtx[i, i + 2*(l-i)] = -1.j / np.sqrt(2)
# The change is due to Condon-Shortley phase
mtx[i, i] = 1.j / np.sqrt(2)
mtx[i, i + 2*(l-i)] = -1.j*(-1.)**(l - i) / np.sqrt(2)
elif i == l:
mtx[i, i] = 1.
else:
#mtx[i, i] = 1./np.sqrt(2)
#mtx[i, i + 2*(l-i)] = (-1.)**(i - l) / np.sqrt(2)
mtx[i, i] = (-1.)**(i - l) /np.sqrt(2)
mtx[i, i + 2*(l-i)] = 1. / np.sqrt(2)
return mtx.T
def construct_spherical_transformation_matrix(l_list):
#construct a transformation matrix from complex harmonics to real for
#lcao basis
nao = np.sum([2 * l + 1 for l in l_list])
mtx = np.zeros([nao, nao], complex)
start = 0
for l in l_list:
n = 2 * l + 1
mtx[start:start+n, start:start+n] = shtm(l)
start += n
return mtx
def aml(ss, l, direction):
#calculat angular momentum matrix(complex spherical harmonics)
#elements based on the overlap
amss = np.zeros(ss.shape, complex)
n = 2 * l + 1
for i in range(n):
m = i - l
# x direction=0, lx=(l+ + l-) /2
if direction == 0:
a1 = 0.5 * np.sqrt(l*(l+1.)-m*(m+1.))
a2 = 0.5 * np.sqrt(l*(l+1.)-m*(m-1.))
# y direction=1, ly=(l+ - l-) /2i
if direction == 1:
a1 = -0.5 * 1.j * np.sqrt(l*(l+1.)-m*(m+1.))
a2 = 0.5 * 1.j * np.sqrt(l*(l+1.)-m*(m-1.))
if direction == 0 or direction == 1:
if m + 1 <= l:
amss[i] += a1 * ss[i + 1]
if m - 1 >= -l:
amss[i] += a2 * ss[i - 1]
elif direction == 2: #z direction=2
amss[i] = m * ss[i]
else:
raise RuntimeError('unknown direction %d' % direction)
return amss
def angular_momentum_slice(overlap_slice, l, direction):
#given a overlap matrix slice <l m| l_i,m> for a fixed l_i
#return the angular momentum matrix slice <l m| l |l_i, m>
#the slice has a shape (nao, 2*l_i + 1)
#the magnetic number m is in the increasing sequence
nao, n = overlap_slice.shape
am_slice = np.zeros([nao, n], complex)
for i in range(nao):
ss = overlap_slice[i]
am_slice[i] = aml(ss, l, direction)
return am_slice
def cut_grids_side(array, gd, gd0):
#abstract the grid value from a including-buffer-layer calculation
#the vaccum buffer layer is fixed on the right side
#Assume the buffer system has the same domain spliting with the original one
from scipy import interpolate
global_array = gd.collect(array)
nx, ny, nz = gd.N_c
if gd.comm.rank == 0:
global_array.shape = (nx * ny, nz)
new_array = gd0.zeros()
global_new_array = gd0.collect(new_array)
x = np.arange(gd.N_c[2]) * gd.h_cv[2, 2]
xnew = np.arange(gd0.N_c[2]) * gd0.h_cv[2, 2]
nz0 = gd0.N_c[2]
if gd0.comm.rank == 0:
global_new_array.shape = (nx * ny, nz0)
for i, line in enumerate(global_array):
tck = interpolate.splrep(x, line, s=0)
global_new_array[i] = interpolate.splev(xnew, tck, der=0)
global_new_array.shape = (nx, ny, nz0)
gd0.distribute(global_new_array, new_array)
return new_array
def save_bias_data_file(Lead1, Lead2, Device):
import pickle
ham = Device.calc.hamiltonian
density = Device.calc.density
hamL = Lead1.calc.hamiltonian
hamR = Lead2.calc.hamiltonian
Ef = Device.calc.get_fermi_level()
Ef_L = Lead1.calc.get_fermi_level()
Ef_R = Lead2.calc.get_fermi_level()
vt_sG = ham.gd.collect(ham.vt_sG)
vt_sG_L = hamL.gd.collect(hamL.vt_sG)
vt_sG_R = hamR.gd.collect(hamR.vt_sG)
vt_sG_L += (Ef - Ef_L) / Hartree
vt_sG_R += (Ef - Ef_R) / Hartree
vt_sG=np.append(vt_sG_L, vt_sG,axis=3)
vt_sG=np.append(vt_sG,vt_sG_R,axis=3)
dH_asp = collect_atomic_matrices(ham.dH_asp, ham.setups,
ham.nspins, ham.gd.comm,
density.rank_a)
pickle.dump(([0.0,0,0], vt_sG, dH_asp), file('bias_data1','wb'),2)
def find(condition, flag=0):
if flag == 1: # return an int
return np.int(np.nonzero(condition)[0])
else: # return an array
return np.nonzero(condition)[0]
def gather_ndarray_list(data, comm):
#data is a numpy array, maybe has different shape in different cpus
#this function gather them to the all_data in master, all_data is
# a list with the lenth world.size, all_data[i] = data {on i}
all_data = []
dim = len(data.shape)
shape_array = np.zeros([comm.size, dim], int)
shape_array[comm.rank] = data.shape
comm.sum(shape_array)
if comm.rank == 0:
all_data.append(data)
for i in range(1, comm.size):
tmp = np.zeros(shape_array[i], dtype=data.dtype)
comm.receive(tmp, i, 546)
all_data.append(tmp[:])
else:
comm.ssend(data, 0, 546)
return all_data
def gather_ndarray_dict(data, comm, broadcast=False):
#data is dict of a numpy array, maybe has different shape in different cpus
#this function gather them to the all_data in master, all_data is
# a dict with the lenth world.size
all_data = {}
data_len = np.zeros([comm.size], int)
data_len[comm.rank] = len(data)
comm.sum(data_len)
info = np.zeros([np.sum(data_len), 3], int)
dtypes = [int, float, complex]
for i, name in enumerate(data):
base = np.sum(data_len[:comm.rank])
info[base + i, 0] = len(data[name].shape)
info[base + i, 1] = comm.rank
info[base + i, 2] = dtypes.index(data[name].dtype)
comm.sum(info)
if comm.rank == 0:
for name in data:
all_data[name] = data[name]
for i in range(1, comm.size):
base = np.sum(data_len[:i])
for j in range(data_len[i]):
shape = np.zeros([info[base + j, 0]], int)
dtype = dtypes[info[base + j, 2]]
name = receive_string(i, comm)
comm.receive(shape, i, 123)
tmp = np.zeros(shape, dtype)
comm.receive(tmp, i, 546)
all_data[name] = tmp
else:
for name in data:
send_string(name, 0, comm)
shape = np.array(data[name].shape, int)
comm.ssend(shape, 0, 123)
comm.ssend(data[name], 0, 546)
if broadcast:
num = np.zeros([1], int)
if comm.rank == 0:
num[0] = len(all_data)
comm.broadcast(num, 0)
for name in all_data:
broadcast_string(name, 0, comm)
shape = np.array(all_data[name].shape, int)
comm.broadcast(shape, 0)
comm.broadcast(all_data[name], 0)
else:
comm.broadcast(num, 0)
for i in range(num):
name = broadcast_string(None, 0, comm)
shape = np.zeros([info[i, 0]], int)
dtype = dtypes[info[i, 2]]
comm.broadcast(shape, 0)
tmp = np.zeros(shape, dtype)
comm.broadcast(tmp, 0)
all_data[name] = tmp
return all_data
|
robwarm/gpaw-symm
|
gpaw/transport/tools.py
|
Python
|
gpl-3.0
| 37,300
|
[
"ASE",
"GPAW"
] |
6ee265ad7b12f873ec5bb4b379a7303b68dae70337ee8c1d7b12bee40e325f9b
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
from zoo.orca.learn.pytorch import Estimator
from zoo.orca.learn.metrics import Accuracy
from zoo.orca.learn.trigger import EveryEpoch
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
class TestEstimatorForSaveAndLoad(TestCase):
def setUp(self):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.sc = init_orca_context(cores=4)
def tearDown(self):
""" teardown any state that was previously setup with a setup_method
call.
"""
stop_orca_context()
def test_bigdl_pytorch_estimator_save_and_load(self):
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Network()
model.train()
criterion = nn.NLLLoss()
adam = torch.optim.Adam(model.parameters(), 0.001)
dir = "./dataset"
batch_size = 320
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(dir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(dir, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=False)
# epoch 1
est = Estimator.from_torch(model=model, optimizer=adam, loss=criterion,
metrics=[Accuracy()])
est.fit(data=train_loader, epochs=1, validation_data=test_loader,
checkpoint_trigger=EveryEpoch())
paras1 = list(est.get_model().named_parameters())
est.save("model_epoch_1")
# epoch 2
est.fit(data=train_loader, epochs=2, validation_data=test_loader,
checkpoint_trigger=EveryEpoch())
paras2 = list(est.get_model().named_parameters())
est.load("model_epoch_1")
paras3 = list(est.get_model().named_parameters())
load_success = 0
for i in range(len(paras2)):
name2, para2 = paras2[i]
name3, para3 = paras3[i]
if not torch.all(torch.eq(para2, para3)):
load_success = 1
break
if not load_success:
raise Exception("Load failed. Parameters did not change after loading.")
for i in range(len(paras1)):
name1, para1 = paras1[i]
name3, para3 = paras3[i]
if not torch.all(torch.eq(para1, para3)):
raise Exception("After reloading the model," + name1 + "does not match.")
print("pass")
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/jep/test_pytorch_estimator_for_save_load.py
|
Python
|
apache-2.0
| 4,255
|
[
"ORCA"
] |
1ff1f86b3d4095a34d590c60e67de71aa8baee328b9aa954a45feaee52f82c85
|
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Patches by Mike Poidinger to support multiple databases.
# Updated by Peter Cock in 2007 to do a better job on BLAST 2.2.15
"""Code for calling standalone BLAST and parsing plain text output (OBSOLETE).
Rather than parsing the human readable plain text BLAST output (which seems to
change with every update to BLAST), we and the NBCI recommend you parse the
XML output instead. The plain text parser in this module still works at the
time of writing, but is considered obsolete and updating it to cope with the
latest versions of BLAST is not a priority for us.
This module also provides code to work with the "legacy" standalone version of
NCBI BLAST, tools blastall, rpsblast and blastpgp via three helper functions of
the same name. These functions are very limited for dealing with the output as
files rather than handles, for which the wrappers in Bio.Blast.Applications are
prefered. Furthermore, the NCBI themselves regard these command line tools as
"legacy", and encourage using the new BLAST+ tools instead. Biopython has
wrappers for these under Bio.Blast.Applications (see the tutorial).
Classes:
LowQualityBlastError Except that indicates low quality query sequences.
BlastParser Parses output from blast.
BlastErrorParser Parses output and tries to diagnose possible errors.
PSIBlastParser Parses output from psi-blast.
Iterator Iterates over a file of blast results.
_Scanner Scans output from standalone BLAST.
_BlastConsumer Consumes output from blast.
_PSIBlastConsumer Consumes output from psi-blast.
_HeaderConsumer Consumes header information.
_DescriptionConsumer Consumes description information.
_AlignmentConsumer Consumes alignment information.
_HSPConsumer Consumes hsp information.
_DatabaseReportConsumer Consumes database report information.
_ParametersConsumer Consumes parameters information.
Functions:
blastall Execute blastall (OBSOLETE).
blastpgp Execute blastpgp (OBSOLETE).
rpsblast Execute rpsblast (OBSOLETE).
For calling the BLAST command line tools, we encourage you to use the
command line wrappers in Bio.Blast.Applications - the three functions
blastall, blastpgp and rpsblast are considered to be obsolete now, and
are likely to be deprecated and then removed in future releases.
"""
import warnings
warnings.warn("The plain text parser in this module still works at the time of writing, but is considered obsolete and updating it to cope with the latest versions of BLAST is not a priority for us.", PendingDeprecationWarning)
import os
import re
from Bio import File
from Bio.ParserSupport import *
from Bio.Blast import Record
from Bio.Application import _escape_filename
class LowQualityBlastError(Exception):
"""Error caused by running a low quality sequence through BLAST.
When low quality sequences (like GenBank entries containing only
stretches of a single nucleotide) are BLASTed, they will result in
BLAST generating an error and not being able to perform the BLAST.
search. This error should be raised for the BLAST reports produced
in this case.
"""
pass
class ShortQueryBlastError(Exception):
"""Error caused by running a short query sequence through BLAST.
If the query sequence is too short, BLAST outputs warnings and errors:
Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch failed.
[blastall] ERROR: [000.000] AT1G08320: Blast:
[blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at least wordsize
done
This exception is raised when that condition is detected.
"""
pass
class _Scanner:
"""Scan BLAST output from blastall or blastpgp.
Tested with blastall and blastpgp v2.0.10, v2.0.11
Methods:
feed Feed data into the scanner.
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a BLAST report for scanning. handle is a file-like
object that contains the BLAST report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
# Try to fast-forward to the beginning of the blast report.
read_and_call_until(uhandle, consumer.noevent, contains='BLAST')
# Now scan the BLAST report.
self._scan_header(uhandle, consumer)
self._scan_rounds(uhandle, consumer)
self._scan_database_report(uhandle, consumer)
self._scan_parameters(uhandle, consumer)
def _scan_header(self, uhandle, consumer):
# BLASTP 2.0.10 [Aug-26-1999]
#
#
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Schaf
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman (1997),
# "Gapped BLAST and PSI-BLAST: a new generation of protein database sea
# programs", Nucleic Acids Res. 25:3389-3402.
#
# Query= test
# (140 letters)
#
# Database: sdqib40-1.35.seg.fa
# 1323 sequences; 223,339 total letters
#
# ========================================================
# This next example is from the online version of Blast,
# note there are TWO references, an RID line, and also
# the database is BEFORE the query line.
# Note there possibleuse of non-ASCII in the author names.
# ========================================================
#
# BLASTP 2.2.15 [Oct-15-2006]
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Sch??ffer,
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman
# (1997), "Gapped BLAST and PSI-BLAST: a new generation of
# protein database search programs", Nucleic Acids Res. 25:3389-3402.
#
# Reference: Sch??ffer, Alejandro A., L. Aravind, Thomas L. Madden, Sergei
# Shavirin, John L. Spouge, Yuri I. Wolf, Eugene V. Koonin, and
# Stephen F. Altschul (2001), "Improving the accuracy of PSI-BLAST
# protein database searches with composition-based statistics
# and other refinements", Nucleic Acids Res. 29:2994-3005.
#
# RID: 1166022616-19998-65316425856.BLASTQ1
#
#
# Database: All non-redundant GenBank CDS
# translations+PDB+SwissProt+PIR+PRF excluding environmental samples
# 4,254,166 sequences; 1,462,033,012 total letters
# Query= gi:16127998
# Length=428
#
consumer.start_header()
read_and_call(uhandle, consumer.version, contains='BLAST')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# There might be a <pre> line, for qblast output.
attempt_read_and_call(uhandle, consumer.noevent, start="<pre>")
# Read the reference(s)
while attempt_read_and_call(uhandle,
consumer.reference, start='Reference'):
# References are normally multiline terminated by a blank line
# (or, based on the old code, the RID line)
while 1:
line = uhandle.readline()
if is_blank_line(line):
consumer.noevent(line)
break
elif line.startswith("RID"):
break
else:
#More of the reference
consumer.reference(line)
#Deal with the optional RID: ...
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.reference, start="RID:")
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp may have a reference for compositional score matrix
# adjustment (see Bug 2502):
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp has a Reference for composition-based statistics.
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
line = uhandle.peekline()
assert line.strip() != ""
assert not line.startswith("RID:")
if line.startswith("Query="):
#This is an old style query then database...
# Read the Query lines and the following blank line.
read_and_call(uhandle, consumer.query_info, start='Query=')
read_and_call_until(uhandle, consumer.query_info, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the database lines and the following blank line.
read_and_call_until(uhandle, consumer.database_info, end='total letters')
read_and_call(uhandle, consumer.database_info, contains='sequences')
read_and_call_while(uhandle, consumer.noevent, blank=1)
elif line.startswith("Database:"):
#This is a new style database then query...
read_and_call_until(uhandle, consumer.database_info, end='total letters')
read_and_call(uhandle, consumer.database_info, contains='sequences')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the Query lines and the following blank line.
# Or, on BLAST 2.2.22+ there is no blank link - need to spot
# the "... Score E" line instead.
read_and_call(uhandle, consumer.query_info, start='Query=')
#read_and_call_until(uhandle, consumer.query_info, blank=1)
while True:
line = uhandle.peekline()
if not line.strip() : break
if "Score E" in line : break
#It is more of the query (and its length)
read_and_call(uhandle, consumer.query_info)
read_and_call_while(uhandle, consumer.noevent, blank=1)
else:
raise ValueError("Invalid header?")
consumer.end_header()
def _scan_rounds(self, uhandle, consumer):
# Scan a bunch of rounds.
# Each round begins with either a "Searching......" line
# or a 'Score E' line followed by descriptions and alignments.
# The email server doesn't give the "Searching....." line.
# If there is no 'Searching.....' line then you'll first see a
# 'Results from round' line
while not self._eof(uhandle):
line = safe_peekline(uhandle)
if (not line.startswith('Searching') and
not line.startswith('Results from round') and
re.search(r"Score +E", line) is None and
line.find('No hits found') == -1):
break
self._scan_descriptions(uhandle, consumer)
self._scan_alignments(uhandle, consumer)
def _scan_descriptions(self, uhandle, consumer):
# Searching..................................................done
# Results from round 2
#
#
# Sc
# Sequences producing significant alignments: (b
# Sequences used in model and found again:
#
# d1tde_2 3.4.1.4.4 (119-244) Thioredoxin reductase [Escherichia ...
# d1tcob_ 1.31.1.5.16 Calcineurin regulatory subunit (B-chain) [B...
# d1symb_ 1.31.1.2.2 Calcyclin (S100) [RAT (RATTUS NORVEGICUS)]
#
# Sequences not found previously or not previously below threshold:
#
# d1osa__ 1.31.1.5.11 Calmodulin [Paramecium tetraurelia]
# d1aoza3 2.5.1.3.3 (339-552) Ascorbate oxidase [zucchini (Cucurb...
#
# If PSI-BLAST, may also have:
#
# CONVERGED!
consumer.start_descriptions()
# Read 'Searching'
# This line seems to be missing in BLASTN 2.1.2 (others?)
attempt_read_and_call(uhandle, consumer.noevent, start='Searching')
# blastpgp 2.0.10 from NCBI 9/19/99 for Solaris sometimes crashes here.
# If this happens, the handle will yield no more information.
if not uhandle.peekline():
raise ValueError("Unexpected end of blast report. " + \
"Looks suspiciously like a PSI-BLAST crash.")
# BLASTN 2.2.3 sometimes spews a bunch of warnings and errors here:
# Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch
# [blastall] ERROR: [000.000] AT1G08320: Blast:
# [blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at leas
# done
# Reported by David Weisman.
# Check for these error lines and ignore them for now. Let
# the BlastErrorParser deal with them.
line = uhandle.peekline()
if line.find("ERROR:") != -1 or line.startswith("done"):
read_and_call_while(uhandle, consumer.noevent, contains="ERROR:")
read_and_call(uhandle, consumer.noevent, start="done")
# Check to see if this is PSI-BLAST.
# If it is, the 'Searching' line will be followed by:
# (version 2.0.10)
# Searching.............................
# Results from round 2
# or (version 2.0.11)
# Searching.............................
#
#
# Results from round 2
# Skip a bunch of blank lines.
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Check for the results line if it's there.
if attempt_read_and_call(uhandle, consumer.round, start='Results'):
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Three things can happen here:
# 1. line contains 'Score E'
# 2. line contains "No hits found"
# 3. no descriptions
# The first one begins a bunch of descriptions. The last two
# indicates that no descriptions follow, and we should go straight
# to the alignments.
if not attempt_read_and_call(
uhandle, consumer.description_header,
has_re=re.compile(r'Score +E')):
# Either case 2 or 3. Look for "No hits found".
attempt_read_and_call(uhandle, consumer.no_hits,
contains='No hits found')
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError, err:
if str(err) != "Unexpected end of stream." : raise err
consumer.end_descriptions()
# Stop processing.
return
# Read the score header lines
read_and_call(uhandle, consumer.description_header,
start='Sequences producing')
# If PSI-BLAST, read the 'Sequences used in model' line.
attempt_read_and_call(uhandle, consumer.model_sequences,
start='Sequences used in model')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# In BLAT, rather than a "No hits found" line, we just
# get no descriptions (and no alignments). This can be
# spotted because the next line is the database block:
if safe_peekline(uhandle).startswith(" Database:"):
consumer.end_descriptions()
# Stop processing.
return
# Read the descriptions and the following blank lines, making
# sure that there are descriptions.
if not uhandle.peekline().startswith('Sequences not found'):
read_and_call_until(uhandle, consumer.description, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# If PSI-BLAST, read the 'Sequences not found' line followed
# by more descriptions. However, I need to watch out for the
# case where there were no sequences not found previously, in
# which case there will be no more descriptions.
if attempt_read_and_call(uhandle, consumer.nonmodel_sequences,
start='Sequences not found'):
# Read the descriptions and the following blank lines.
read_and_call_while(uhandle, consumer.noevent, blank=1)
l = safe_peekline(uhandle)
# Brad -- added check for QUERY. On some PSI-BLAST outputs
# there will be a 'Sequences not found' line followed by no
# descriptions. Check for this case since the first thing you'll
# get is a blank line and then 'QUERY'
if not l.startswith('CONVERGED') and l[0] != '>' \
and not l.startswith('QUERY'):
read_and_call_until(uhandle, consumer.description, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.converged, start='CONVERGED')
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_descriptions()
def _scan_alignments(self, uhandle, consumer):
if self._eof(uhandle) : return
# qblast inserts a helpful line here.
attempt_read_and_call(uhandle, consumer.noevent, start="ALIGNMENTS")
# First, check to see if I'm at the database report.
line = safe_peekline(uhandle)
if not line:
#EOF
return
elif line.startswith(' Database') or line.startswith("Lambda"):
return
elif line[0] == '>':
# XXX make a better check here between pairwise and masterslave
self._scan_pairwise_alignments(uhandle, consumer)
else:
# XXX put in a check to make sure I'm in a masterslave alignment
self._scan_masterslave_alignment(uhandle, consumer)
def _scan_pairwise_alignments(self, uhandle, consumer):
while not self._eof(uhandle):
line = safe_peekline(uhandle)
if line[0] != '>':
break
self._scan_one_pairwise_alignment(uhandle, consumer)
def _scan_one_pairwise_alignment(self, uhandle, consumer):
if self._eof(uhandle) : return
consumer.start_alignment()
self._scan_alignment_header(uhandle, consumer)
# Scan a bunch of score/alignment pairs.
while 1:
if self._eof(uhandle):
#Shouldn't have issued that _scan_alignment_header event...
break
line = safe_peekline(uhandle)
if not line.startswith(' Score'):
break
self._scan_hsp(uhandle, consumer)
consumer.end_alignment()
def _scan_alignment_header(self, uhandle, consumer):
# >d1rip__ 2.24.7.1.1 Ribosomal S17 protein [Bacillus
# stearothermophilus]
# Length = 81
#
# Or, more recently with different white space:
#
# >gi|15799684|ref|NP_285696.1| threonine synthase ...
# gi|15829258|ref|NP_308031.1| threonine synthase
# ...
# Length=428
read_and_call(uhandle, consumer.title, start='>')
while 1:
line = safe_readline(uhandle)
if line.lstrip().startswith('Length =') \
or line.lstrip().startswith('Length='):
consumer.length(line)
break
elif is_blank_line(line):
# Check to make sure I haven't missed the Length line
raise ValueError("I missed the Length in an alignment header")
consumer.title(line)
# Older versions of BLAST will have a line with some spaces.
# Version 2.0.14 (maybe 2.0.13?) and above print a true blank line.
if not attempt_read_and_call(uhandle, consumer.noevent,
start=' '):
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp(self, uhandle, consumer):
consumer.start_hsp()
self._scan_hsp_header(uhandle, consumer)
self._scan_hsp_alignment(uhandle, consumer)
consumer.end_hsp()
def _scan_hsp_header(self, uhandle, consumer):
# Score = 22.7 bits (47), Expect = 2.5
# Identities = 10/36 (27%), Positives = 18/36 (49%)
# Strand = Plus / Plus
# Frame = +3
#
read_and_call(uhandle, consumer.score, start=' Score')
read_and_call(uhandle, consumer.identities, start=' Identities')
# BLASTN
attempt_read_and_call(uhandle, consumer.strand, start = ' Strand')
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frame, start = ' Frame')
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp_alignment(self, uhandle, consumer):
# Query: 11 GRGVSACA-------TCDGFFYRNQKVAVIGGGNTAVEEALYLSNIASEVHLIHRRDGF
# GRGVS+ TC Y + + V GGG+ + EE L + I R+
# Sbjct: 12 GRGVSSVVRRCIHKPTCKE--YAVKIIDVTGGGSFSAEEVQELREATLKEVDILRKVSG
#
# Query: 64 AEKILIKR 71
# I +K
# Sbjct: 70 PNIIQLKD 77
#
while 1:
# Blastn adds an extra line filled with spaces before Query
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
read_and_call(uhandle, consumer.query, start='Query')
read_and_call(uhandle, consumer.align, start=' ')
read_and_call(uhandle, consumer.sbjct, start='Sbjct')
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError, err:
if str(err) != "Unexpected end of stream." : raise err
# End of File (well, it looks like it with recent versions
# of BLAST for multiple queries after the Iterator class
# has broken up the whole file into chunks).
break
line = safe_peekline(uhandle)
# Alignment continues if I see a 'Query' or the spaces for Blastn.
if not (line.startswith('Query') or line.startswith(' ')):
break
def _scan_masterslave_alignment(self, uhandle, consumer):
consumer.start_alignment()
while 1:
line = safe_readline(uhandle)
# Check to see whether I'm finished reading the alignment.
# This is indicated by 1) database section, 2) next psi-blast
# round, which can also be a 'Results from round' if no
# searching line is present
# patch by chapmanb
if line.startswith('Searching') or \
line.startswith('Results from round'):
uhandle.saveline(line)
break
elif line.startswith(' Database'):
uhandle.saveline(line)
break
elif is_blank_line(line):
consumer.noevent(line)
else:
consumer.multalign(line)
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_alignment()
def _eof(self, uhandle):
try:
line = safe_peekline(uhandle)
except ValueError, err:
if str(err) != "Unexpected end of stream." : raise err
line = ""
return not line
def _scan_database_report(self, uhandle, consumer):
# Database: sdqib40-1.35.seg.fa
# Posted date: Nov 1, 1999 4:25 PM
# Number of letters in database: 223,339
# Number of sequences in database: 1323
#
# Lambda K H
# 0.322 0.133 0.369
#
# Gapped
# Lambda K H
# 0.270 0.0470 0.230
#
##########################################
# Or, more recently Blast 2.2.15 gives less blank lines
##########################################
# Database: All non-redundant GenBank CDS translations+PDB+SwissProt+PIR+PRF excluding
# environmental samples
# Posted date: Dec 12, 2006 5:51 PM
# Number of letters in database: 667,088,753
# Number of sequences in database: 2,094,974
# Lambda K H
# 0.319 0.136 0.395
# Gapped
# Lambda K H
# 0.267 0.0410 0.140
if self._eof(uhandle) : return
consumer.start_database_report()
# Subset of the database(s) listed below
# Number of letters searched: 562,618,960
# Number of sequences searched: 228,924
if attempt_read_and_call(uhandle, consumer.noevent, start=" Subset"):
read_and_call(uhandle, consumer.noevent, contains="letters")
read_and_call(uhandle, consumer.noevent, contains="sequences")
read_and_call(uhandle, consumer.noevent, start=" ")
# Sameet Mehta reported seeing output from BLASTN 2.2.9 that
# was missing the "Database" stanza completely.
while attempt_read_and_call(uhandle, consumer.database,
start=' Database'):
# BLAT output ends abruptly here, without any of the other
# information. Check to see if this is the case. If so,
# then end the database report here gracefully.
if not uhandle.peekline().strip() \
or uhandle.peekline().startswith("BLAST"):
consumer.end_database_report()
return
# Database can span multiple lines.
read_and_call_until(uhandle, consumer.database, start=' Posted')
read_and_call(uhandle, consumer.posted_date, start=' Posted')
read_and_call(uhandle, consumer.num_letters_in_database,
start=' Number of letters')
read_and_call(uhandle, consumer.num_sequences_in_database,
start=' Number of sequences')
#There may not be a line starting with spaces...
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
line = safe_readline(uhandle)
uhandle.saveline(line)
if line.find('Lambda') != -1:
break
read_and_call(uhandle, consumer.noevent, start='Lambda')
read_and_call(uhandle, consumer.ka_params)
#This blank line is optional:
attempt_read_and_call(uhandle, consumer.noevent, blank=1)
# not BLASTP
attempt_read_and_call(uhandle, consumer.gapped, start='Gapped')
# not TBLASTX
if attempt_read_and_call(uhandle, consumer.noevent, start='Lambda'):
read_and_call(uhandle, consumer.ka_params_gap)
# Blast 2.2.4 can sometimes skip the whole parameter section.
# Thus, I need to be careful not to read past the end of the
# file.
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError, x:
if str(x) != "Unexpected end of stream.":
raise
consumer.end_database_report()
def _scan_parameters(self, uhandle, consumer):
# Matrix: BLOSUM62
# Gap Penalties: Existence: 11, Extension: 1
# Number of Hits to DB: 50604
# Number of Sequences: 1323
# Number of extensions: 1526
# Number of successful extensions: 6
# Number of sequences better than 10.0: 5
# Number of HSP's better than 10.0 without gapping: 5
# Number of HSP's successfully gapped in prelim test: 0
# Number of HSP's that attempted gapping in prelim test: 1
# Number of HSP's gapped (non-prelim): 5
# length of query: 140
# length of database: 223,339
# effective HSP length: 39
# effective length of query: 101
# effective length of database: 171,742
# effective search space: 17345942
# effective search space used: 17345942
# T: 11
# A: 40
# X1: 16 ( 7.4 bits)
# X2: 38 (14.8 bits)
# X3: 64 (24.9 bits)
# S1: 41 (21.9 bits)
# S2: 42 (20.8 bits)
##########################################
# Or, more recently Blast(x) 2.2.15 gives
##########################################
# Matrix: BLOSUM62
# Gap Penalties: Existence: 11, Extension: 1
# Number of Sequences: 4535438
# Number of Hits to DB: 2,588,844,100
# Number of extensions: 60427286
# Number of successful extensions: 126433
# Number of sequences better than 2.0: 30
# Number of HSP's gapped: 126387
# Number of HSP's successfully gapped: 35
# Length of query: 291
# Length of database: 1,573,298,872
# Length adjustment: 130
# Effective length of query: 161
# Effective length of database: 983,691,932
# Effective search space: 158374401052
# Effective search space used: 158374401052
# Neighboring words threshold: 12
# Window for multiple hits: 40
# X1: 16 ( 7.3 bits)
# X2: 38 (14.6 bits)
# X3: 64 (24.7 bits)
# S1: 41 (21.7 bits)
# S2: 32 (16.9 bits)
# Blast 2.2.4 can sometimes skip the whole parameter section.
# BLAT also skips the whole parameter section.
# Thus, check to make sure that the parameter section really
# exists.
if not uhandle.peekline().strip():
return
# BLASTN 2.2.9 looks like it reverses the "Number of Hits" and
# "Number of Sequences" lines.
consumer.start_parameters()
# Matrix line may be missing in BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.matrix, start='Matrix')
# not TBLASTX
attempt_read_and_call(uhandle, consumer.gap_penalties, start='Gap')
attempt_read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
attempt_read_and_call(uhandle, consumer.num_hits,
start='Number of Hits')
attempt_read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
attempt_read_and_call(uhandle, consumer.num_extends,
start='Number of extensions')
attempt_read_and_call(uhandle, consumer.num_good_extends,
start='Number of successful')
attempt_read_and_call(uhandle, consumer.num_seqs_better_e,
start='Number of sequences')
# not BLASTN, TBLASTX
if attempt_read_and_call(uhandle, consumer.hsps_no_gap,
start="Number of HSP's better"):
# BLASTN 2.2.9
if attempt_read_and_call(uhandle, consumer.noevent,
start="Number of HSP's gapped:"):
read_and_call(uhandle, consumer.noevent,
start="Number of HSP's successfully")
#This is ommitted in 2.2.15
attempt_read_and_call(uhandle, consumer.noevent,
start="Number of extra gapped extensions")
else:
read_and_call(uhandle, consumer.hsps_prelim_gapped,
start="Number of HSP's successfully")
read_and_call(uhandle, consumer.hsps_prelim_gap_attempted,
start="Number of HSP's that")
read_and_call(uhandle, consumer.hsps_gapped,
start="Number of HSP's gapped")
#e.g. BLASTX 2.2.15 where the "better" line is missing
elif attempt_read_and_call(uhandle, consumer.noevent,
start="Number of HSP's gapped"):
read_and_call(uhandle, consumer.noevent,
start="Number of HSP's successfully")
# not in blastx 2.2.1
attempt_read_and_call(uhandle, consumer.query_length,
has_re=re.compile(r"[Ll]ength of query"))
# Not in BLASTX 2.2.22+
attempt_read_and_call(uhandle, consumer.database_length,
has_re=re.compile(r"[Ll]ength of \s*[Dd]atabase"))
# BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.noevent,
start="Length adjustment")
attempt_read_and_call(uhandle, consumer.effective_hsp_length,
start='effective HSP')
# Not in blastx 2.2.1
attempt_read_and_call(
uhandle, consumer.effective_query_length,
has_re=re.compile(r'[Ee]ffective length of query'))
# This is not in BLASTP 2.2.15
attempt_read_and_call(
uhandle, consumer.effective_database_length,
has_re=re.compile(r'[Ee]ffective length of \s*[Dd]atabase'))
# Not in blastx 2.2.1, added a ':' to distinguish between
# this and the 'effective search space used' line
attempt_read_and_call(
uhandle, consumer.effective_search_space,
has_re=re.compile(r'[Ee]ffective search space:'))
# Does not appear in BLASTP 2.0.5
attempt_read_and_call(
uhandle, consumer.effective_search_space_used,
has_re=re.compile(r'[Ee]ffective search space used'))
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frameshift, start='frameshift')
# not in BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.threshold, start='T')
# In BLASTX 2.2.15 replaced by: "Neighboring words threshold: 12"
attempt_read_and_call(uhandle, consumer.threshold, start='Neighboring words threshold')
# not in BLASTX 2.2.15
attempt_read_and_call(uhandle, consumer.window_size, start='A')
# get this instead: "Window for multiple hits: 40"
attempt_read_and_call(uhandle, consumer.window_size, start='Window for multiple hits')
# not in BLASTX 2.2.22+
attempt_read_and_call(uhandle, consumer.dropoff_1st_pass, start='X1')
# not TBLASTN
attempt_read_and_call(uhandle, consumer.gap_x_dropoff, start='X2')
# not BLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.gap_x_dropoff_final,
start='X3')
# not TBLASTN
attempt_read_and_call(uhandle, consumer.gap_trigger, start='S1')
# not in blastx 2.2.1
# first we make sure we have additional lines to work with, if
# not then the file is done and we don't have a final S2
if not is_blank_line(uhandle.peekline(), allow_spaces=1):
read_and_call(uhandle, consumer.blast_cutoff, start='S2')
consumer.end_parameters()
class BlastParser(AbstractParser):
"""Parses BLAST data into a Record.Blast object.
"""
def __init__(self):
"""__init__(self)"""
self._scanner = _Scanner()
self._consumer = _BlastConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class PSIBlastParser(AbstractParser):
"""Parses BLAST data into a Record.PSIBlast object.
"""
def __init__(self):
"""__init__(self)"""
self._scanner = _Scanner()
self._consumer = _PSIBlastConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _HeaderConsumer:
def start_header(self):
self._header = Record.Header()
def version(self, line):
c = line.split()
self._header.application = c[0]
self._header.version = c[1]
if len(c) > 2:
#The date is missing in the new C++ output from blastx 2.2.22+
#Just get "BLASTX 2.2.22+\n" and that's all.
self._header.date = c[2][1:-1]
def reference(self, line):
if line.startswith('Reference: '):
self._header.reference = line[11:]
else:
self._header.reference = self._header.reference + line
def query_info(self, line):
if line.startswith('Query= '):
self._header.query = line[7:].lstrip()
elif line.startswith('Length='):
#New style way to give the query length in BLAST 2.2.22+ (the C++ code)
self._header.query_letters = _safe_int(line[7:].strip())
elif not line.startswith(' '): # continuation of query_info
self._header.query = "%s%s" % (self._header.query, line)
else:
#Hope it is the old style way to give the query length:
letters, = _re_search(
r"([0-9,]+) letters", line,
"I could not find the number of letters in line\n%s" % line)
self._header.query_letters = _safe_int(letters)
def database_info(self, line):
line = line.rstrip()
if line.startswith('Database: '):
self._header.database = line[10:]
elif not line.endswith('total letters'):
if self._header.database:
#Need to include a space when merging multi line datase descr
self._header.database = self._header.database + " " + line.strip()
else:
self._header.database = line.strip()
else:
sequences, letters =_re_search(
r"([0-9,]+) sequences; ([0-9,-]+) total letters", line,
"I could not find the sequences and letters in line\n%s" %line)
self._header.database_sequences = _safe_int(sequences)
self._header.database_letters = _safe_int(letters)
def end_header(self):
# Get rid of the trailing newlines
self._header.reference = self._header.reference.rstrip()
self._header.query = self._header.query.rstrip()
class _DescriptionConsumer:
def start_descriptions(self):
self._descriptions = []
self._model_sequences = []
self._nonmodel_sequences = []
self._converged = 0
self._type = None
self._roundnum = None
self.__has_n = 0 # Does the description line contain an N value?
def description_header(self, line):
if line.startswith('Sequences producing'):
cols = line.split()
if cols[-1] == 'N':
self.__has_n = 1
def description(self, line):
dh = self._parse(line)
if self._type == 'model':
self._model_sequences.append(dh)
elif self._type == 'nonmodel':
self._nonmodel_sequences.append(dh)
else:
self._descriptions.append(dh)
def model_sequences(self, line):
self._type = 'model'
def nonmodel_sequences(self, line):
self._type = 'nonmodel'
def converged(self, line):
self._converged = 1
def no_hits(self, line):
pass
def round(self, line):
if not line.startswith('Results from round'):
raise ValueError("I didn't understand the round line\n%s" % line)
self._roundnum = _safe_int(line[18:].strip())
def end_descriptions(self):
pass
def _parse(self, description_line):
line = description_line # for convenience
dh = Record.Description()
# I need to separate the score and p-value from the title.
# sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77
# sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77 1
# special cases to handle:
# - title must be preserved exactly (including whitespaces)
# - score could be equal to e-value (not likely, but what if??)
# - sometimes there's an "N" score of '1'.
cols = line.split()
if len(cols) < 3:
raise ValueError( \
"Line does not appear to contain description:\n%s" % line)
if self.__has_n:
i = line.rfind(cols[-1]) # find start of N
i = line.rfind(cols[-2], 0, i) # find start of p-value
i = line.rfind(cols[-3], 0, i) # find start of score
else:
i = line.rfind(cols[-1]) # find start of p-value
i = line.rfind(cols[-2], 0, i) # find start of score
if self.__has_n:
dh.title, dh.score, dh.e, dh.num_alignments = \
line[:i].rstrip(), cols[-3], cols[-2], cols[-1]
else:
dh.title, dh.score, dh.e, dh.num_alignments = \
line[:i].rstrip(), cols[-2], cols[-1], 1
dh.num_alignments = _safe_int(dh.num_alignments)
dh.score = _safe_int(dh.score)
dh.e = _safe_float(dh.e)
return dh
class _AlignmentConsumer:
# This is a little bit tricky. An alignment can either be a
# pairwise alignment or a multiple alignment. Since it's difficult
# to know a-priori which one the blast record will contain, I'm going
# to make one class that can parse both of them.
def start_alignment(self):
self._alignment = Record.Alignment()
self._multiple_alignment = Record.MultipleAlignment()
def title(self, line):
if self._alignment.title:
self._alignment.title += " "
self._alignment.title += line.strip()
def length(self, line):
#e.g. "Length = 81" or more recently, "Length=428"
parts = line.replace(" ","").split("=")
assert len(parts)==2, "Unrecognised format length line"
self._alignment.length = parts[1]
self._alignment.length = _safe_int(self._alignment.length)
def multalign(self, line):
# Standalone version uses 'QUERY', while WWW version uses blast_tmp.
if line.startswith('QUERY') or line.startswith('blast_tmp'):
# If this is the first line of the multiple alignment,
# then I need to figure out how the line is formatted.
# Format of line is:
# QUERY 1 acttg...gccagaggtggtttattcagtctccataagagaggggacaaacg 60
try:
name, start, seq, end = line.split()
except ValueError:
raise ValueError("I do not understand the line\n%s" % line)
self._start_index = line.index(start, len(name))
self._seq_index = line.index(seq,
self._start_index+len(start))
# subtract 1 for the space
self._name_length = self._start_index - 1
self._start_length = self._seq_index - self._start_index - 1
self._seq_length = line.rfind(end) - self._seq_index - 1
#self._seq_index = line.index(seq)
## subtract 1 for the space
#self._seq_length = line.rfind(end) - self._seq_index - 1
#self._start_index = line.index(start)
#self._start_length = self._seq_index - self._start_index - 1
#self._name_length = self._start_index
# Extract the information from the line
name = line[:self._name_length]
name = name.rstrip()
start = line[self._start_index:self._start_index+self._start_length]
start = start.rstrip()
if start:
start = _safe_int(start)
end = line[self._seq_index+self._seq_length:].rstrip()
if end:
end = _safe_int(end)
seq = line[self._seq_index:self._seq_index+self._seq_length].rstrip()
# right pad the sequence with spaces if necessary
if len(seq) < self._seq_length:
seq = seq + ' '*(self._seq_length-len(seq))
# I need to make sure the sequence is aligned correctly with the query.
# First, I will find the length of the query. Then, if necessary,
# I will pad my current sequence with spaces so that they will line
# up correctly.
# Two possible things can happen:
# QUERY
# 504
#
# QUERY
# 403
#
# Sequence 504 will need padding at the end. Since I won't know
# this until the end of the alignment, this will be handled in
# end_alignment.
# Sequence 403 will need padding before being added to the alignment.
align = self._multiple_alignment.alignment # for convenience
align.append((name, start, seq, end))
# This is old code that tried to line up all the sequences
# in a multiple alignment by using the sequence title's as
# identifiers. The problem with this is that BLAST assigns
# different HSP's from the same sequence the same id. Thus,
# in one alignment block, there may be multiple sequences with
# the same id. I'm not sure how to handle this, so I'm not
# going to.
# # If the sequence is the query, then just add it.
# if name == 'QUERY':
# if len(align) == 0:
# align.append((name, start, seq))
# else:
# aname, astart, aseq = align[0]
# if name != aname:
# raise ValueError, "Query is not the first sequence"
# aseq = aseq + seq
# align[0] = aname, astart, aseq
# else:
# if len(align) == 0:
# raise ValueError, "I could not find the query sequence"
# qname, qstart, qseq = align[0]
#
# # Now find my sequence in the multiple alignment.
# for i in range(1, len(align)):
# aname, astart, aseq = align[i]
# if name == aname:
# index = i
# break
# else:
# # If I couldn't find it, then add a new one.
# align.append((None, None, None))
# index = len(align)-1
# # Make sure to left-pad it.
# aname, astart, aseq = name, start, ' '*(len(qseq)-len(seq))
#
# if len(qseq) != len(aseq) + len(seq):
# # If my sequences are shorter than the query sequence,
# # then I will need to pad some spaces to make them line up.
# # Since I've already right padded seq, that means aseq
# # must be too short.
# aseq = aseq + ' '*(len(qseq)-len(aseq)-len(seq))
# aseq = aseq + seq
# if astart is None:
# astart = start
# align[index] = aname, astart, aseq
def end_alignment(self):
# Remove trailing newlines
if self._alignment:
self._alignment.title = self._alignment.title.rstrip()
# This code is also obsolete. See note above.
# If there's a multiple alignment, I will need to make sure
# all the sequences are aligned. That is, I may need to
# right-pad the sequences.
# if self._multiple_alignment is not None:
# align = self._multiple_alignment.alignment
# seqlen = None
# for i in range(len(align)):
# name, start, seq = align[i]
# if seqlen is None:
# seqlen = len(seq)
# else:
# if len(seq) < seqlen:
# seq = seq + ' '*(seqlen - len(seq))
# align[i] = name, start, seq
# elif len(seq) > seqlen:
# raise ValueError, \
# "Sequence %s is longer than the query" % name
# Clean up some variables, if they exist.
try:
del self._seq_index
del self._seq_length
del self._start_index
del self._start_length
del self._name_length
except AttributeError:
pass
class _HSPConsumer:
def start_hsp(self):
self._hsp = Record.HSP()
def score(self, line):
self._hsp.bits, self._hsp.score = _re_search(
r"Score =\s*([0-9.e+]+) bits \(([0-9]+)\)", line,
"I could not find the score in line\n%s" % line)
self._hsp.score = _safe_float(self._hsp.score)
self._hsp.bits = _safe_float(self._hsp.bits)
x, y = _re_search(
r"Expect\(?(\d*)\)? = +([0-9.e\-|\+]+)", line,
"I could not find the expect in line\n%s" % line)
if x:
self._hsp.num_alignments = _safe_int(x)
else:
self._hsp.num_alignments = 1
self._hsp.expect = _safe_float(y)
def identities(self, line):
x, y = _re_search(
r"Identities = (\d+)\/(\d+)", line,
"I could not find the identities in line\n%s" % line)
self._hsp.identities = _safe_int(x), _safe_int(y)
self._hsp.align_length = _safe_int(y)
if line.find('Positives') != -1:
x, y = _re_search(
r"Positives = (\d+)\/(\d+)", line,
"I could not find the positives in line\n%s" % line)
self._hsp.positives = _safe_int(x), _safe_int(y)
assert self._hsp.align_length == _safe_int(y)
if line.find('Gaps') != -1:
x, y = _re_search(
r"Gaps = (\d+)\/(\d+)", line,
"I could not find the gaps in line\n%s" % line)
self._hsp.gaps = _safe_int(x), _safe_int(y)
assert self._hsp.align_length == _safe_int(y)
def strand(self, line):
self._hsp.strand = _re_search(
r"Strand = (\w+) / (\w+)", line,
"I could not find the strand in line\n%s" % line)
def frame(self, line):
# Frame can be in formats:
# Frame = +1
# Frame = +2 / +2
if line.find('/') != -1:
self._hsp.frame = _re_search(
r"Frame = ([-+][123]) / ([-+][123])", line,
"I could not find the frame in line\n%s" % line)
else:
self._hsp.frame = _re_search(
r"Frame = ([-+][123])", line,
"I could not find the frame in line\n%s" % line)
# Match a space, if one is available. Masahir Ishikawa found a
# case where there's no space between the start and the sequence:
# Query: 100tt 101
# line below modified by Yair Benita, Sep 2004
# Note that the colon is not always present. 2006
_query_re = re.compile(r"Query(:?) \s*(\d+)\s*(.+) (\d+)")
def query(self, line):
m = self._query_re.search(line)
if m is None:
raise ValueError("I could not find the query in line\n%s" % line)
# line below modified by Yair Benita, Sep 2004.
# added the end attribute for the query
colon, start, seq, end = m.groups()
self._hsp.query = self._hsp.query + seq
if self._hsp.query_start is None:
self._hsp.query_start = _safe_int(start)
# line below added by Yair Benita, Sep 2004.
# added the end attribute for the query
self._hsp.query_end = _safe_int(end)
#Get index for sequence start (regular expression element 3)
self._query_start_index = m.start(3)
self._query_len = len(seq)
def align(self, line):
seq = line[self._query_start_index:].rstrip()
if len(seq) < self._query_len:
# Make sure the alignment is the same length as the query
seq = seq + ' ' * (self._query_len-len(seq))
elif len(seq) < self._query_len:
raise ValueError("Match is longer than the query in line\n%s" \
% line)
self._hsp.match = self._hsp.match + seq
# To match how we do the query, cache the regular expression.
# Note that the colon is not always present.
_sbjct_re = re.compile(r"Sbjct(:?) \s*(\d+)\s*(.+) (\d+)")
def sbjct(self, line):
m = self._sbjct_re.search(line)
if m is None:
raise ValueError("I could not find the sbjct in line\n%s" % line)
colon, start, seq, end = m.groups()
#mikep 26/9/00
#On occasion, there is a blast hit with no subject match
#so far, it only occurs with 1-line short "matches"
#I have decided to let these pass as they appear
if not seq.strip():
seq = ' ' * self._query_len
self._hsp.sbjct = self._hsp.sbjct + seq
if self._hsp.sbjct_start is None:
self._hsp.sbjct_start = _safe_int(start)
self._hsp.sbjct_end = _safe_int(end)
if len(seq) != self._query_len:
raise ValueError( \
"QUERY and SBJCT sequence lengths don't match in line\n%s" \
% line)
del self._query_start_index # clean up unused variables
del self._query_len
def end_hsp(self):
pass
class _DatabaseReportConsumer:
def start_database_report(self):
self._dr = Record.DatabaseReport()
def database(self, line):
m = re.search(r"Database: (.+)$", line)
if m:
self._dr.database_name.append(m.group(1))
elif self._dr.database_name:
# This must be a continuation of the previous name.
self._dr.database_name[-1] = "%s%s" % (self._dr.database_name[-1],
line.strip())
def posted_date(self, line):
self._dr.posted_date.append(_re_search(
r"Posted date:\s*(.+)$", line,
"I could not find the posted date in line\n%s" % line))
def num_letters_in_database(self, line):
letters, = _get_cols(
line, (-1,), ncols=6, expected={2:"letters", 4:"database:"})
self._dr.num_letters_in_database.append(_safe_int(letters))
def num_sequences_in_database(self, line):
sequences, = _get_cols(
line, (-1,), ncols=6, expected={2:"sequences", 4:"database:"})
self._dr.num_sequences_in_database.append(_safe_int(sequences))
def ka_params(self, line):
x = line.split()
self._dr.ka_params = map(_safe_float, x)
def gapped(self, line):
self._dr.gapped = 1
def ka_params_gap(self, line):
x = line.split()
self._dr.ka_params_gap = map(_safe_float, x)
def end_database_report(self):
pass
class _ParametersConsumer:
def start_parameters(self):
self._params = Record.Parameters()
def matrix(self, line):
self._params.matrix = line[8:].rstrip()
def gap_penalties(self, line):
x = _get_cols(
line, (3, 5), ncols=6, expected={2:"Existence:", 4:"Extension:"})
self._params.gap_penalties = map(_safe_float, x)
def num_hits(self, line):
if line.find('1st pass') != -1:
x, = _get_cols(line, (-4,), ncols=11, expected={2:"Hits"})
self._params.num_hits = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=6, expected={2:"Hits"})
self._params.num_hits = _safe_int(x)
def num_sequences(self, line):
if line.find('1st pass') != -1:
x, = _get_cols(line, (-4,), ncols=9, expected={2:"Sequences:"})
self._params.num_sequences = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=4, expected={2:"Sequences:"})
self._params.num_sequences = _safe_int(x)
def num_extends(self, line):
if line.find('1st pass') != -1:
x, = _get_cols(line, (-4,), ncols=9, expected={2:"extensions:"})
self._params.num_extends = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=4, expected={2:"extensions:"})
self._params.num_extends = _safe_int(x)
def num_good_extends(self, line):
if line.find('1st pass') != -1:
x, = _get_cols(line, (-4,), ncols=10, expected={3:"extensions:"})
self._params.num_good_extends = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=5, expected={3:"extensions:"})
self._params.num_good_extends = _safe_int(x)
def num_seqs_better_e(self, line):
self._params.num_seqs_better_e, = _get_cols(
line, (-1,), ncols=7, expected={2:"sequences"})
self._params.num_seqs_better_e = _safe_int(
self._params.num_seqs_better_e)
def hsps_no_gap(self, line):
self._params.hsps_no_gap, = _get_cols(
line, (-1,), ncols=9, expected={3:"better", 7:"gapping:"})
self._params.hsps_no_gap = _safe_int(self._params.hsps_no_gap)
def hsps_prelim_gapped(self, line):
self._params.hsps_prelim_gapped, = _get_cols(
line, (-1,), ncols=9, expected={4:"gapped", 6:"prelim"})
self._params.hsps_prelim_gapped = _safe_int(
self._params.hsps_prelim_gapped)
def hsps_prelim_gapped_attempted(self, line):
self._params.hsps_prelim_gapped_attempted, = _get_cols(
line, (-1,), ncols=10, expected={4:"attempted", 7:"prelim"})
self._params.hsps_prelim_gapped_attempted = _safe_int(
self._params.hsps_prelim_gapped_attempted)
def hsps_gapped(self, line):
self._params.hsps_gapped, = _get_cols(
line, (-1,), ncols=6, expected={3:"gapped"})
self._params.hsps_gapped = _safe_int(self._params.hsps_gapped)
def query_length(self, line):
self._params.query_length, = _get_cols(
line.lower(), (-1,), ncols=4, expected={0:"length", 2:"query:"})
self._params.query_length = _safe_int(self._params.query_length)
def database_length(self, line):
self._params.database_length, = _get_cols(
line.lower(), (-1,), ncols=4, expected={0:"length", 2:"database:"})
self._params.database_length = _safe_int(self._params.database_length)
def effective_hsp_length(self, line):
self._params.effective_hsp_length, = _get_cols(
line, (-1,), ncols=4, expected={1:"HSP", 2:"length:"})
self._params.effective_hsp_length = _safe_int(
self._params.effective_hsp_length)
def effective_query_length(self, line):
self._params.effective_query_length, = _get_cols(
line, (-1,), ncols=5, expected={1:"length", 3:"query:"})
self._params.effective_query_length = _safe_int(
self._params.effective_query_length)
def effective_database_length(self, line):
self._params.effective_database_length, = _get_cols(
line.lower(), (-1,), ncols=5, expected={1:"length", 3:"database:"})
self._params.effective_database_length = _safe_int(
self._params.effective_database_length)
def effective_search_space(self, line):
self._params.effective_search_space, = _get_cols(
line, (-1,), ncols=4, expected={1:"search"})
self._params.effective_search_space = _safe_int(
self._params.effective_search_space)
def effective_search_space_used(self, line):
self._params.effective_search_space_used, = _get_cols(
line, (-1,), ncols=5, expected={1:"search", 3:"used:"})
self._params.effective_search_space_used = _safe_int(
self._params.effective_search_space_used)
def frameshift(self, line):
self._params.frameshift = _get_cols(
line, (4, 5), ncols=6, expected={0:"frameshift", 2:"decay"})
def threshold(self, line):
if line[:2] == "T:":
#Assume its an old stlye line like "T: 123"
self._params.threshold, = _get_cols(
line, (1,), ncols=2, expected={0:"T:"})
elif line[:28] == "Neighboring words threshold:":
self._params.threshold, = _get_cols(
line, (3,), ncols=4, expected={0:"Neighboring", 1:"words", 2:"threshold:"})
else:
raise ValueError("Unrecognised threshold line:\n%s" % line)
self._params.threshold = _safe_int(self._params.threshold)
def window_size(self, line):
if line[:2] == "A:":
self._params.window_size, = _get_cols(
line, (1,), ncols=2, expected={0:"A:"})
elif line[:25] == "Window for multiple hits:":
self._params.window_size, = _get_cols(
line, (4,), ncols=5, expected={0:"Window", 2:"multiple", 3:"hits:"})
else:
raise ValueError("Unrecognised window size line:\n%s" % line)
self._params.window_size = _safe_int(self._params.window_size)
def dropoff_1st_pass(self, line):
score, bits = _re_search(
r"X1: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the dropoff in line\n%s" % line)
self._params.dropoff_1st_pass = _safe_int(score), _safe_float(bits)
def gap_x_dropoff(self, line):
score, bits = _re_search(
r"X2: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap dropoff in line\n%s" % line)
self._params.gap_x_dropoff = _safe_int(score), _safe_float(bits)
def gap_x_dropoff_final(self, line):
score, bits = _re_search(
r"X3: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap dropoff final in line\n%s" % line)
self._params.gap_x_dropoff_final = _safe_int(score), _safe_float(bits)
def gap_trigger(self, line):
score, bits = _re_search(
r"S1: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap trigger in line\n%s" % line)
self._params.gap_trigger = _safe_int(score), _safe_float(bits)
def blast_cutoff(self, line):
score, bits = _re_search(
r"S2: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the blast cutoff in line\n%s" % line)
self._params.blast_cutoff = _safe_int(score), _safe_float(bits)
def end_parameters(self):
pass
class _BlastConsumer(AbstractConsumer,
_HeaderConsumer,
_DescriptionConsumer,
_AlignmentConsumer,
_HSPConsumer,
_DatabaseReportConsumer,
_ParametersConsumer
):
# This Consumer is inherits from many other consumer classes that handle
# the actual dirty work. An alternate way to do it is to create objects
# of those classes and then delegate the parsing tasks to them in a
# decorator-type pattern. The disadvantage of that is that the method
# names will need to be resolved in this classes. However, using
# a decorator will retain more control in this class (which may or
# may not be a bad thing). In addition, having each sub-consumer as
# its own object prevents this object's dictionary from being cluttered
# with members and reduces the chance of member collisions.
def __init__(self):
self.data = None
def round(self, line):
# Make sure nobody's trying to pass me PSI-BLAST data!
raise ValueError("This consumer doesn't handle PSI-BLAST data")
def start_header(self):
self.data = Record.Blast()
_HeaderConsumer.start_header(self)
def end_header(self):
_HeaderConsumer.end_header(self)
self.data.__dict__.update(self._header.__dict__)
def end_descriptions(self):
self.data.descriptions = self._descriptions
def end_alignment(self):
_AlignmentConsumer.end_alignment(self)
if self._alignment.hsps:
self.data.alignments.append(self._alignment)
if self._multiple_alignment.alignment:
self.data.multiple_alignment = self._multiple_alignment
def end_hsp(self):
_HSPConsumer.end_hsp(self)
try:
self._alignment.hsps.append(self._hsp)
except AttributeError:
raise ValueError("Found an HSP before an alignment")
def end_database_report(self):
_DatabaseReportConsumer.end_database_report(self)
self.data.__dict__.update(self._dr.__dict__)
def end_parameters(self):
_ParametersConsumer.end_parameters(self)
self.data.__dict__.update(self._params.__dict__)
class _PSIBlastConsumer(AbstractConsumer,
_HeaderConsumer,
_DescriptionConsumer,
_AlignmentConsumer,
_HSPConsumer,
_DatabaseReportConsumer,
_ParametersConsumer
):
def __init__(self):
self.data = None
def start_header(self):
self.data = Record.PSIBlast()
_HeaderConsumer.start_header(self)
def end_header(self):
_HeaderConsumer.end_header(self)
self.data.__dict__.update(self._header.__dict__)
def start_descriptions(self):
self._round = Record.Round()
self.data.rounds.append(self._round)
_DescriptionConsumer.start_descriptions(self)
def end_descriptions(self):
_DescriptionConsumer.end_descriptions(self)
self._round.number = self._roundnum
if self._descriptions:
self._round.new_seqs.extend(self._descriptions)
self._round.reused_seqs.extend(self._model_sequences)
self._round.new_seqs.extend(self._nonmodel_sequences)
if self._converged:
self.data.converged = 1
def end_alignment(self):
_AlignmentConsumer.end_alignment(self)
if self._alignment.hsps:
self._round.alignments.append(self._alignment)
if self._multiple_alignment:
self._round.multiple_alignment = self._multiple_alignment
def end_hsp(self):
_HSPConsumer.end_hsp(self)
try:
self._alignment.hsps.append(self._hsp)
except AttributeError:
raise ValueError("Found an HSP before an alignment")
def end_database_report(self):
_DatabaseReportConsumer.end_database_report(self)
self.data.__dict__.update(self._dr.__dict__)
def end_parameters(self):
_ParametersConsumer.end_parameters(self)
self.data.__dict__.update(self._params.__dict__)
class Iterator:
"""Iterates over a file of multiple BLAST results.
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
try:
handle.readline
except AttributeError:
raise ValueError(
"I expected a file handle or file-like object, got %s"
% type(handle))
self._uhandle = File.UndoHandle(handle)
self._parser = parser
self._header = []
def next(self):
"""next(self) -> object
Return the next Blast record from the file. If no more records,
return None.
"""
lines = []
query = False
while 1:
line = self._uhandle.readline()
if not line:
break
# If I've reached the next one, then put the line back and stop.
if lines and (line.startswith('BLAST')
or line.startswith('BLAST', 1)
or line.startswith('<?xml ')):
self._uhandle.saveline(line)
break
# New style files ommit the BLAST line to mark a new query:
if line.startswith("Query="):
if not query:
if not self._header:
self._header = lines[:]
query = True
else:
#Start of another record
self._uhandle.saveline(line)
break
lines.append(line)
if query and "BLAST" not in lines[0]:
#Cheat and re-insert the header
#print "-"*50
#print "".join(self._header)
#print "-"*50
#print "".join(lines)
#print "-"*50
lines = self._header + lines
if not lines:
return None
data = ''.join(lines)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
def blastall(blastcmd, program, database, infile, align_view='7', **keywds):
"""Execute and retrieve data from standalone BLASTPALL as handles (OBSOLETE).
NOTE - This function is obsolete, you are encouraged to the command
line wrapper Bio.Blast.Applications.BlastallCommandline instead.
Execute and retrieve data from blastall. blastcmd is the command
used to launch the 'blastall' executable. program is the blast program
to use, e.g. 'blastp', 'blastn', etc. database is the path to the database
to search against. infile is the path to the file containing
the sequence to search with.
The return values are two handles, for standard output and standard error.
You may pass more parameters to **keywds to change the behavior of
the search. Otherwise, optional values will be chosen by blastall.
The Blast output is by default in XML format. Use the align_view keyword
for output in a different format.
Scoring
matrix Matrix to use.
gap_open Gap open penalty.
gap_extend Gap extension penalty.
nuc_match Nucleotide match reward. (BLASTN)
nuc_mismatch Nucleotide mismatch penalty. (BLASTN)
query_genetic_code Genetic code for Query.
db_genetic_code Genetic code for database. (TBLAST[NX])
Algorithm
gapped Whether to do a gapped alignment. T/F (not for TBLASTX)
expectation Expectation value cutoff.
wordsize Word size.
strands Query strands to search against database.([T]BLAST[NX])
keep_hits Number of best hits from a region to keep.
xdrop Dropoff value (bits) for gapped alignments.
hit_extend Threshold for extending hits.
region_length Length of region used to judge hits.
db_length Effective database length.
search_length Effective length of search space.
Processing
filter Filter query sequence for low complexity (with SEG)? T/F
believe_query Believe the query defline. T/F
restrict_gi Restrict search to these GI's.
nprocessors Number of processors to use.
oldengine Force use of old engine T/F
Formatting
html Produce HTML output? T/F
descriptions Number of one-line descriptions.
alignments Number of alignments.
align_view Alignment view. Integer 0-11,
passed as a string or integer.
show_gi Show GI's in deflines? T/F
seqalign_file seqalign file to output.
outfile Output file for report. Filename to write to, if
ommitted standard output is used (which you can access
from the returned handles).
"""
_security_check_parameters(keywds)
att2param = {
'matrix' : '-M',
'gap_open' : '-G',
'gap_extend' : '-E',
'nuc_match' : '-r',
'nuc_mismatch' : '-q',
'query_genetic_code' : '-Q',
'db_genetic_code' : '-D',
'gapped' : '-g',
'expectation' : '-e',
'wordsize' : '-W',
'strands' : '-S',
'keep_hits' : '-K',
'xdrop' : '-X',
'hit_extend' : '-f',
'region_length' : '-L',
'db_length' : '-z',
'search_length' : '-Y',
'program' : '-p',
'database' : '-d',
'infile' : '-i',
'filter' : '-F',
'believe_query' : '-J',
'restrict_gi' : '-l',
'nprocessors' : '-a',
'oldengine' : '-V',
'html' : '-T',
'descriptions' : '-v',
'alignments' : '-b',
'align_view' : '-m',
'show_gi' : '-I',
'seqalign_file' : '-O',
'outfile' : '-o',
}
import warnings
warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastallCommandline instead.", PendingDeprecationWarning)
from Applications import BlastallCommandline
cline = BlastallCommandline(blastcmd)
cline.set_parameter(att2param['program'], program)
cline.set_parameter(att2param['database'], database)
cline.set_parameter(att2param['infile'], infile)
cline.set_parameter(att2param['align_view'], str(align_view))
for key, value in keywds.iteritems():
cline.set_parameter(att2param[key], str(value))
return _invoke_blast(cline)
def blastpgp(blastcmd, database, infile, align_view='7', **keywds):
"""Execute and retrieve data from standalone BLASTPGP as handles (OBSOLETE).
NOTE - This function is obsolete, you are encouraged to the command
line wrapper Bio.Blast.Applications.BlastpgpCommandline instead.
Execute and retrieve data from blastpgp. blastcmd is the command
used to launch the 'blastpgp' executable. database is the path to the
database to search against. infile is the path to the file containing
the sequence to search with.
The return values are two handles, for standard output and standard error.
You may pass more parameters to **keywds to change the behavior of
the search. Otherwise, optional values will be chosen by blastpgp.
The Blast output is by default in XML format. Use the align_view keyword
for output in a different format.
Scoring
matrix Matrix to use.
gap_open Gap open penalty.
gap_extend Gap extension penalty.
window_size Multiple hits window size.
npasses Number of passes.
passes Hits/passes. Integer 0-2.
Algorithm
gapped Whether to do a gapped alignment. T/F
expectation Expectation value cutoff.
wordsize Word size.
keep_hits Number of beset hits from a region to keep.
xdrop Dropoff value (bits) for gapped alignments.
hit_extend Threshold for extending hits.
region_length Length of region used to judge hits.
db_length Effective database length.
search_length Effective length of search space.
nbits_gapping Number of bits to trigger gapping.
pseudocounts Pseudocounts constants for multiple passes.
xdrop_final X dropoff for final gapped alignment.
xdrop_extension Dropoff for blast extensions.
model_threshold E-value threshold to include in multipass model.
required_start Start of required region in query.
required_end End of required region in query.
Processing
XXX should document default values
program The blast program to use. (PHI-BLAST)
filter Filter query sequence for low complexity (with SEG)? T/F
believe_query Believe the query defline? T/F
nprocessors Number of processors to use.
Formatting
html Produce HTML output? T/F
descriptions Number of one-line descriptions.
alignments Number of alignments.
align_view Alignment view. Integer 0-11,
passed as a string or integer.
show_gi Show GI's in deflines? T/F
seqalign_file seqalign file to output.
align_outfile Output file for alignment.
checkpoint_outfile Output file for PSI-BLAST checkpointing.
restart_infile Input file for PSI-BLAST restart.
hit_infile Hit file for PHI-BLAST.
matrix_outfile Output file for PSI-BLAST matrix in ASCII.
align_outfile Output file for alignment. Filename to write to, if
ommitted standard output is used (which you can access
from the returned handles).
align_infile Input alignment file for PSI-BLAST restart.
"""
import warnings
warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead.", PendingDeprecationWarning)
_security_check_parameters(keywds)
att2param = {
'matrix' : '-M',
'gap_open' : '-G',
'gap_extend' : '-E',
'window_size' : '-A',
'npasses' : '-j',
'passes' : '-P',
'gapped' : '-g',
'expectation' : '-e',
'wordsize' : '-W',
'keep_hits' : '-K',
'xdrop' : '-X',
'hit_extend' : '-f',
'region_length' : '-L',
'db_length' : '-Z',
'search_length' : '-Y',
'nbits_gapping' : '-N',
'pseudocounts' : '-c',
'xdrop_final' : '-Z',
'xdrop_extension' : '-y',
'model_threshold' : '-h',
'required_start' : '-S',
'required_end' : '-H',
'program' : '-p',
'database' : '-d',
'infile' : '-i',
'filter' : '-F',
'believe_query' : '-J',
'nprocessors' : '-a',
'html' : '-T',
'descriptions' : '-v',
'alignments' : '-b',
'align_view' : '-m',
'show_gi' : '-I',
'seqalign_file' : '-O',
'align_outfile' : '-o',
'checkpoint_outfile' : '-C',
'restart_infile' : '-R',
'hit_infile' : '-k',
'matrix_outfile' : '-Q',
'align_infile' : '-B',
}
from Applications import BlastpgpCommandline
cline = BlastpgpCommandline(blastcmd)
cline.set_parameter(att2param['database'], database)
cline.set_parameter(att2param['infile'], infile)
cline.set_parameter(att2param['align_view'], str(align_view))
for key, value in keywds.iteritems():
cline.set_parameter(att2param[key], str(value))
return _invoke_blast(cline)
def rpsblast(blastcmd, database, infile, align_view="7", **keywds):
"""Execute and retrieve data from standalone RPS-BLAST as handles (OBSOLETE).
NOTE - This function is obsolete, you are encouraged to the command
line wrapper Bio.Blast.Applications.RpsBlastCommandline instead.
Execute and retrieve data from standalone RPS-BLAST. blastcmd is the
command used to launch the 'rpsblast' executable. database is the path
to the database to search against. infile is the path to the file
containing the sequence to search with.
The return values are two handles, for standard output and standard error.
You may pass more parameters to **keywds to change the behavior of
the search. Otherwise, optional values will be chosen by rpsblast.
Please note that this function will give XML output by default, by
setting align_view to seven (i.e. command line option -m 7).
You should use the NCBIXML.parse() function to read the resulting output.
This is because NCBIStandalone.BlastParser() does not understand the
plain text output format from rpsblast.
WARNING - The following text and associated parameter handling has not
received extensive testing. Please report any errors we might have made...
Algorithm/Scoring
gapped Whether to do a gapped alignment. T/F
multihit 0 for multiple hit (default), 1 for single hit
expectation Expectation value cutoff.
range_restriction Range restriction on query sequence (Format: start,stop) blastp only
0 in 'start' refers to the beginning of the sequence
0 in 'stop' refers to the end of the sequence
Default = 0,0
xdrop Dropoff value (bits) for gapped alignments.
xdrop_final X dropoff for final gapped alignment (in bits).
xdrop_extension Dropoff for blast extensions (in bits).
search_length Effective length of search space.
nbits_gapping Number of bits to trigger gapping.
protein Query sequence is protein. T/F
db_length Effective database length.
Processing
filter Filter query sequence for low complexity? T/F
case_filter Use lower case filtering of FASTA sequence T/F, default F
believe_query Believe the query defline. T/F
nprocessors Number of processors to use.
logfile Name of log file to use, default rpsblast.log
Formatting
html Produce HTML output? T/F
descriptions Number of one-line descriptions.
alignments Number of alignments.
align_view Alignment view. Integer 0-11,
passed as a string or integer.
show_gi Show GI's in deflines? T/F
seqalign_file seqalign file to output.
align_outfile Output file for alignment. Filename to write to, if
ommitted standard output is used (which you can access
from the returned handles).
"""
import warnings
warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.", PendingDeprecationWarning)
_security_check_parameters(keywds)
att2param = {
'multihit' : '-P',
'gapped' : '-g',
'expectation' : '-e',
'range_restriction' : '-L',
'xdrop' : '-X',
'xdrop_final' : '-Z',
'xdrop_extension' : '-y',
'search_length' : '-Y',
'nbits_gapping' : '-N',
'protein' : '-p',
'db_length' : '-z',
'database' : '-d',
'infile' : '-i',
'filter' : '-F',
'case_filter' : '-U',
'believe_query' : '-J',
'nprocessors' : '-a',
'logfile' : '-l',
'html' : '-T',
'descriptions' : '-v',
'alignments' : '-b',
'align_view' : '-m',
'show_gi' : '-I',
'seqalign_file' : '-O',
'align_outfile' : '-o',
}
from Applications import RpsBlastCommandline
cline = RpsBlastCommandline(blastcmd)
cline.set_parameter(att2param['database'], database)
cline.set_parameter(att2param['infile'], infile)
cline.set_parameter(att2param['align_view'], str(align_view))
for key, value in keywds.iteritems():
cline.set_parameter(att2param[key], str(value))
return _invoke_blast(cline)
def _re_search(regex, line, error_msg):
m = re.search(regex, line)
if not m:
raise ValueError(error_msg)
return m.groups()
def _get_cols(line, cols_to_get, ncols=None, expected={}):
cols = line.split()
# Check to make sure number of columns is correct
if ncols is not None and len(cols) != ncols:
raise ValueError("I expected %d columns (got %d) in line\n%s" \
% (ncols, len(cols), line))
# Check to make sure columns contain the correct data
for k in expected:
if cols[k] != expected[k]:
raise ValueError("I expected '%s' in column %d in line\n%s" \
% (expected[k], k, line))
# Construct the answer tuple
results = []
for c in cols_to_get:
results.append(cols[c])
return tuple(results)
def _safe_int(str):
try:
return int(str)
except ValueError:
# Something went wrong. Try to clean up the string.
# Remove all commas from the string
str = str.replace(',', '')
# try again after removing commas.
# Note int() will return a long rather than overflow
try:
return int(str)
except ValueError:
pass
# Call float to handle things like "54.3", note could lose precision, e.g.
# >>> int("5399354557888517312")
# 5399354557888517312
# >>> int(float("5399354557888517312"))
# 5399354557888517120
return int(float(str))
def _safe_float(str):
# Thomas Rosleff Soerensen (rosleff@mpiz-koeln.mpg.de) noted that
# float('e-172') does not produce an error on his platform. Thus,
# we need to check the string for this condition.
# Sometimes BLAST leaves of the '1' in front of an exponent.
if str and str[0] in ['E', 'e']:
str = '1' + str
try:
return float(str)
except ValueError:
# Remove all commas from the string
str = str.replace(',', '')
# try again.
return float(str)
def _invoke_blast(cline):
"""Start BLAST and returns handles for stdout and stderr (PRIVATE).
Expects a command line wrapper object from Bio.Blast.Applications
"""
import subprocess, sys
blast_cmd = cline.program_name
if not os.path.exists(blast_cmd):
raise ValueError("BLAST executable does not exist at %s" % blast_cmd)
#We don't need to supply any piped input, but we setup the
#standard input pipe anyway as a work around for a python
#bug if this is called from a Windows GUI program. For
#details, see http://bugs.python.org/issue1124861
blast_process = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
blast_process.stdin.close()
return blast_process.stdout, blast_process.stderr
def _security_check_parameters(param_dict):
"""Look for any attempt to insert a command into a parameter.
e.g. blastall(..., matrix='IDENTITY -F 0; rm -rf /etc/passwd')
Looks for ";" or "&&" in the strings (Unix and Windows syntax
for appending a command line), or ">", "<" or "|" (redirection)
and if any are found raises an exception.
"""
for key, value in param_dict.iteritems():
str_value = str(value) # Could easily be an int or a float
for bad_str in [";", "&&", ">", "<", "|"]:
if bad_str in str_value:
raise ValueError("Rejecting suspicious argument for %s" % key)
class _BlastErrorConsumer(_BlastConsumer):
def __init__(self):
_BlastConsumer.__init__(self)
def noevent(self, line):
if line.find("Query must be at least wordsize") != -1:
raise ShortQueryBlastError("Query must be at least wordsize")
# Now pass the line back up to the superclass.
method = getattr(_BlastConsumer, 'noevent',
_BlastConsumer.__getattr__(self, 'noevent'))
method(line)
class BlastErrorParser(AbstractParser):
"""Attempt to catch and diagnose BLAST errors while parsing.
This utilizes the BlastParser module but adds an additional layer
of complexity on top of it by attempting to diagnose ValueErrors
that may actually indicate problems during BLAST parsing.
Current BLAST problems this detects are:
o LowQualityBlastError - When BLASTing really low quality sequences
(ie. some GenBank entries which are just short streches of a single
nucleotide), BLAST will report an error with the sequence and be
unable to search with this. This will lead to a badly formatted
BLAST report that the parsers choke on. The parser will convert the
ValueError to a LowQualityBlastError and attempt to provide useful
information.
"""
def __init__(self, bad_report_handle = None):
"""Initialize a parser that tries to catch BlastErrors.
Arguments:
o bad_report_handle - An optional argument specifying a handle
where bad reports should be sent. This would allow you to save
all of the bad reports to a file, for instance. If no handle
is specified, the bad reports will not be saved.
"""
self._bad_report_handle = bad_report_handle
#self._b_parser = BlastParser()
self._scanner = _Scanner()
self._consumer = _BlastErrorConsumer()
def parse(self, handle):
"""Parse a handle, attempting to diagnose errors.
"""
results = handle.read()
try:
self._scanner.feed(File.StringHandle(results), self._consumer)
except ValueError, msg:
# if we have a bad_report_file, save the info to it first
if self._bad_report_handle:
# send the info to the error handle
self._bad_report_handle.write(results)
# now we want to try and diagnose the error
self._diagnose_error(
File.StringHandle(results), self._consumer.data)
# if we got here we can't figure out the problem
# so we should pass along the syntax error we got
raise
return self._consumer.data
def _diagnose_error(self, handle, data_record):
"""Attempt to diagnose an error in the passed handle.
Arguments:
o handle - The handle potentially containing the error
o data_record - The data record partially created by the consumer.
"""
line = handle.readline()
while line:
# 'Searchingdone' instead of 'Searching......done' seems
# to indicate a failure to perform the BLAST due to
# low quality sequence
if line.startswith('Searchingdone'):
raise LowQualityBlastError("Blast failure occured on query: ",
data_record.query)
line = handle.readline()
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Blast/NCBIStandalone.py
|
Python
|
gpl-2.0
| 90,826
|
[
"BLAST",
"Biopython"
] |
2dcfbedda805ea22753f5e57052c964a26a032fad93a731d3ce61a726a1a9c12
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.destroy('feature-gates')
kubelet_opts.destroy('experimental-nvidia-gpus')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# cleanup old flagmanagers
FlagManager('kubelet').destroy_all()
FlagManager('kube-proxy').destroy_all()
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
nodeuser = 'system:node:{}'.format(gethostname())
creds = kube_control.get_auth_credentials(nodeuser)
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_worker_services(servers, dns, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def configure_worker_services(api_servers, dns, cluster_cidr):
''' Add remaining flags for the worker services and configure snaps to use
them '''
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add('require-kubeconfig', 'true')
kubelet_opts.add('kubeconfig', kubeconfig_path)
kubelet_opts.add('network-plugin', 'cni')
kubelet_opts.add('v', '0')
kubelet_opts.add('address', '0.0.0.0')
kubelet_opts.add('port', '10250')
kubelet_opts.add('cluster-dns', dns['sdn-ip'])
kubelet_opts.add('cluster-domain', dns['domain'])
kubelet_opts.add('anonymous-auth', 'false')
kubelet_opts.add('client-ca-file', ca_cert_path)
kubelet_opts.add('tls-cert-file', server_cert_path)
kubelet_opts.add('tls-private-key-file', server_key_path)
kubelet_opts.add('logtostderr', 'true')
kubelet_opts.add('fail-swap-on', 'false')
kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeproxyconfig_path)
kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts.add('conntrack-max-per-core', '0')
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
check_call(cmd)
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13"
if arch() == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
kubelet_opts.destroy('experimental-nvidia-gpus')
else:
kubelet_opts.remove('feature-gates', 'Accelerators=true')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
hostname = gethostname()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
|
sakshamsharma/kubernetes
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 32,928
|
[
"CDK"
] |
a54a01d99ceeae8f70dbc163c92b11f048f1f7fc258ba9b4410711e19e161879
|
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012 Heiko Strathmann
#
from numpy import *
from pylab import *
from scipy import *
from modshogun import RealFeatures
from modshogun import DataGenerator
from modshogun import GaussianKernel
from modshogun import HSIC
from modshogun import PERMUTATION, HSIC_GAMMA
from modshogun import EuclideanDistance
from modshogun import Statistics, Math
# for nice plotting that fits into our shogun tutorial
import latex_plot_inits
def hsic_graphical():
# parameters, change to get different results
m=250
difference=3
# setting the angle lower makes a harder test
angle=pi/30
# number of samples taken from null and alternative distribution
num_null_samples=500
# use data generator class to produce example data
data=DataGenerator.generate_sym_mix_gauss(m,difference,angle)
# create shogun feature representation
features_x=RealFeatures(array([data[0]]))
features_y=RealFeatures(array([data[1]]))
# compute median data distance in order to use for Gaussian kernel width
# 0.5*median_distance normally (factor two in Gaussian kernel)
# However, shoguns kernel width is different to usual parametrization
# Therefore 0.5*2*median_distance^2
# Use a subset of data for that, only 200 elements. Median is stable
subset=int32(array([x for x in range(features_x.get_num_vectors())])) # numpy
subset=random.permutation(subset) # numpy permutation
subset=subset[0:200]
features_x.add_subset(subset)
dist=EuclideanDistance(features_x, features_x)
distances=dist.get_distance_matrix()
features_x.remove_subset()
median_distance=np.median(distances)
sigma_x=median_distance**2
features_y.add_subset(subset)
dist=EuclideanDistance(features_y, features_y)
distances=dist.get_distance_matrix()
features_y.remove_subset()
median_distance=np.median(distances)
sigma_y=median_distance**2
print "median distance for Gaussian kernel on x:", sigma_x
print "median distance for Gaussian kernel on y:", sigma_y
kernel_x=GaussianKernel(10,sigma_x)
kernel_y=GaussianKernel(10,sigma_y)
# create hsic instance. Note that this is a convienience constructor which copies
# feature data. features_x and features_y are not these used in hsic.
# This is only for user-friendlyness. Usually, its ok to do this.
# Below, the alternative distribution is sampled, which means
# that new feature objects have to be created in each iteration (slow)
# However, normally, the alternative distribution is not sampled
hsic=HSIC(kernel_x,kernel_y,features_x,features_y)
# sample alternative distribution
alt_samples=zeros(num_null_samples)
for i in range(len(alt_samples)):
data=DataGenerator.generate_sym_mix_gauss(m,difference,angle)
features_x.set_feature_matrix(array([data[0]]))
features_y.set_feature_matrix(array([data[1]]))
# re-create hsic instance everytime since feature objects are copied due to
# useage of convienience constructor
hsic=HSIC(kernel_x,kernel_y,features_x,features_y)
alt_samples[i]=hsic.compute_statistic()
# sample from null distribution
# permutation, biased statistic
hsic.set_null_approximation_method(PERMUTATION)
hsic.set_num_null_samples(num_null_samples)
null_samples_boot=hsic.sample_null()
# fit gamma distribution, biased statistic
hsic.set_null_approximation_method(HSIC_GAMMA)
gamma_params=hsic.fit_null_gamma()
# sample gamma with parameters
null_samples_gamma=array([gamma(gamma_params[0], gamma_params[1]) for _ in range(num_null_samples)])
# plot
figure()
# plot data x and y
subplot(2,2,1)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 4) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 4) ) # reduce number of x-ticks
grid(True)
plot(data[0], data[1], 'o')
title('Data, rotation=$\pi$/'+str(1/angle*pi)+'\nm='+str(m))
xlabel('$x$')
ylabel('$y$')
# compute threshold for test level
alpha=0.05
null_samples_boot.sort()
null_samples_gamma.sort()
thresh_boot=null_samples_boot[floor(len(null_samples_boot)*(1-alpha))];
thresh_gamma=null_samples_gamma[floor(len(null_samples_gamma)*(1-alpha))];
type_one_error_boot=sum(null_samples_boot<thresh_boot)/float(num_null_samples)
type_one_error_gamma=sum(null_samples_gamma<thresh_boot)/float(num_null_samples)
# plot alternative distribution with threshold
subplot(2,2,2)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
grid(True)
hist(alt_samples, 20, normed=True);
axvline(thresh_boot, 0, 1, linewidth=2, color='red')
type_two_error=sum(alt_samples<thresh_boot)/float(num_null_samples)
title('Alternative Dist.\n' + 'Type II error is ' + str(type_two_error))
# compute range for all null distribution histograms
hist_range=[min([min(null_samples_boot), min(null_samples_gamma)]), max([max(null_samples_boot), max(null_samples_gamma)])]
# plot null distribution with threshold
subplot(2,2,3)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
grid(True)
hist(null_samples_boot, 20, range=hist_range, normed=True);
axvline(thresh_boot, 0, 1, linewidth=2, color='red')
title('Sampled Null Dist.\n' + 'Type I error is ' + str(type_one_error_boot))
# plot null distribution gamma
subplot(2,2,4)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
grid(True)
hist(null_samples_gamma, 20, range=hist_range, normed=True);
axvline(thresh_gamma, 0, 1, linewidth=2, color='red')
title('Null Dist. Gamma\nType I error is ' + str(type_one_error_gamma))
grid(True)
# pull plots a bit apart
subplots_adjust(hspace=0.5)
subplots_adjust(wspace=0.5)
if __name__=='__main__':
hsic_graphical()
show()
|
yorkerlin/shogun
|
examples/undocumented/python_modular/graphical/statistics_hsic.py
|
Python
|
gpl-3.0
| 6,117
|
[
"Gaussian"
] |
dce09f55c434f7f03fbc66924140699b64dc122453727e3e35e4b0759ddcadcf
|
#!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <johnp@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Combo boxes"
description = """
The ComboBox widget allows to select one option out of a list.
The ComboBoxEntry additionally allows the user to enter a value
that is not in the list of options.
How the options are displayed is controlled by cell renderers.
"""
# See FIXME's
is_fully_bound = False
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, GObject
(PIXBUF_COL,
TEXT_COL) = range(2)
class MaskEntry(Gtk.Entry):
__gtype_name__ = 'MaskEntry'
def __init__(self, mask=None):
self.mask = mask
super(MaskEntry, self).__init__()
self.connect('changed', self.changed_cb)
self.error_color = Gdk.RGBA()
self.error_color.red = 1.0
self.error_color.green = 0.9
self.error_color_blue = 0.9
self.error_color.alpha = 1.0
# workaround since override_color doesn't accept None yet
style_ctx = self.get_style_context()
self.normal_color = style_ctx.get_color(0)
def set_background(self):
if self.mask:
if not GLib.regex_match_simple(self.mask,
self.get_text(), 0, 0):
self.override_color(0, self.error_color)
return
self.override_color(0, self.normal_color)
def changed_cb(self, entry):
self.set_background()
class ComboboxApp:
def __init__(self, demoapp):
self.demoapp = demoapp
self.window = Gtk.Window()
self.window.set_title('Combo boxes')
self.window.set_border_width(10)
self.window.connect('destroy', lambda w: Gtk.main_quit())
vbox = Gtk.VBox(homogeneous=False, spacing=2)
self.window.add(vbox)
frame = Gtk.Frame(label='Some stock icons')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
model = self.create_stock_icon_store()
combo = Gtk.ComboBox(model=model)
box.add(combo)
renderer = Gtk.CellRendererPixbuf()
combo.pack_start(renderer, False)
# FIXME: override set_attributes
combo.add_attribute(renderer, 'pixbuf', PIXBUF_COL)
combo.set_cell_data_func(renderer, self.set_sensitive, None)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', TEXT_COL)
combo.set_cell_data_func(renderer, self.set_sensitive, None)
combo.set_row_separator_func(self.is_separator, None)
combo.set_active(0)
# a combobox demonstrating trees
frame = Gtk.Frame(label='Where are we ?')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
model = self.create_capital_store()
combo = Gtk.ComboBox(model=model)
box.add(combo)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', 0)
combo.set_cell_data_func(renderer, self.is_capital_sensistive, None)
path = Gtk.TreePath('0:8')
treeiter = model.get_iter(path)
combo.set_active_iter(treeiter)
# A GtkComboBoxEntry with validation.
frame = Gtk.Frame(label='Editable')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
combo = Gtk.ComboBoxText.new_with_entry()
self.fill_combo_entry(combo)
box.add(combo)
entry = MaskEntry(mask='^([0-9]*|One|Two|2\302\275|Three)$')
Gtk.Container.remove(combo, combo.get_child())
combo.add(entry)
# A combobox with string IDs
frame = Gtk.Frame(label='String IDs')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
# FIXME: model is not setup when constructing Gtk.ComboBoxText()
# so we call new() - Gtk should fix this to setup the model
# in __init__, not in the constructor
combo = Gtk.ComboBoxText.new()
combo.append('never', 'Not visible')
combo.append('when-active', 'Visible when active')
combo.append('always', 'Always visible')
box.add(combo)
entry = Gtk.Entry()
# FIXME: a bug in PyGObject does not allow us to access dynamic
# methods on GObject.Object, so bind properties the hard way
# GObject.Object.bind_property(combo, 'active-id',
# entry, 'text',
# GObject.BindingFlags.BIDIRECTIONAL)
self.combo_notify_id = \
combo.connect('notify::active-id',
self.combo_active_id_changed, entry)
self.entry_notify_id = \
entry.connect('notify::text',
self.entry_text_changed, combo)
box.add(entry)
self.window.show_all()
def combo_active_id_changed(self, combo, pspec, entry):
entry.disconnect(self.entry_notify_id)
entry.set_text(combo.get_property('active-id'))
self.entry_notify_id = \
entry.connect('notify::text',
self.entry_text_changed, combo)
def entry_text_changed(self, entry, pspec, combo):
combo.disconnect(self.combo_notify_id)
combo.set_property('active-id', entry.get_text())
self.combo_notify_id = \
combo.connect('notify::active-id',
self.combo_active_id_changed, entry)
def strip_underscore(self, s):
return s.replace('_', '')
def create_stock_icon_store(self):
stock_id = (Gtk.STOCK_DIALOG_WARNING,
Gtk.STOCK_STOP,
Gtk.STOCK_NEW,
Gtk.STOCK_CLEAR,
None,
Gtk.STOCK_OPEN)
cellview = Gtk.CellView()
store = Gtk.ListStore(GdkPixbuf.Pixbuf, str)
for id in stock_id:
if id is not None:
pixbuf = cellview.render_icon(id, Gtk.IconSize.BUTTON, None)
item = Gtk.stock_lookup(id)
label = self.strip_underscore(item.label)
store.append((pixbuf, label))
else:
store.append((None, 'separator'))
return store
def set_sensitive(self, cell_layout, cell, tree_model, treeiter, data):
"""
A GtkCellLayoutDataFunc that demonstrates how one can control
sensitivity of rows. This particular function does nothing
useful and just makes the second row insensitive.
"""
path = tree_model.get_path(treeiter)
indices = path.get_indices()
sensitive = not(indices[0] == 1)
cell.set_property('sensitive', sensitive)
def is_separator(self, model, treeiter, data):
"""
A GtkTreeViewRowSeparatorFunc that demonstrates how rows can be
rendered as separators. This particular function does nothing
useful and just turns the fourth row into a separator.
"""
path = model.get_path(treeiter)
indices = path.get_indices()
result = (indices[0] == 4)
return result
def create_capital_store(self):
capitals = (
{'group': 'A - B', 'capital': None},
{'group': None, 'capital': 'Albany'},
{'group': None, 'capital': 'Annapolis'},
{'group': None, 'capital': 'Atlanta'},
{'group': None, 'capital': 'Augusta'},
{'group': None, 'capital': 'Austin'},
{'group': None, 'capital': 'Baton Rouge'},
{'group': None, 'capital': 'Bismarck'},
{'group': None, 'capital': 'Boise'},
{'group': None, 'capital': 'Boston'},
{'group': 'C - D', 'capital': None},
{'group': None, 'capital': 'Carson City'},
{'group': None, 'capital': 'Charleston'},
{'group': None, 'capital': 'Cheyeene'},
{'group': None, 'capital': 'Columbia'},
{'group': None, 'capital': 'Columbus'},
{'group': None, 'capital': 'Concord'},
{'group': None, 'capital': 'Denver'},
{'group': None, 'capital': 'Des Moines'},
{'group': None, 'capital': 'Dover'},
{'group': 'E - J', 'capital': None},
{'group': None, 'capital': 'Frankfort'},
{'group': None, 'capital': 'Harrisburg'},
{'group': None, 'capital': 'Hartford'},
{'group': None, 'capital': 'Helena'},
{'group': None, 'capital': 'Honolulu'},
{'group': None, 'capital': 'Indianapolis'},
{'group': None, 'capital': 'Jackson'},
{'group': None, 'capital': 'Jefferson City'},
{'group': None, 'capital': 'Juneau'},
{'group': 'K - O', 'capital': None},
{'group': None, 'capital': 'Lansing'},
{'group': None, 'capital': 'Lincon'},
{'group': None, 'capital': 'Little Rock'},
{'group': None, 'capital': 'Madison'},
{'group': None, 'capital': 'Montgomery'},
{'group': None, 'capital': 'Montpelier'},
{'group': None, 'capital': 'Nashville'},
{'group': None, 'capital': 'Oklahoma City'},
{'group': None, 'capital': 'Olympia'},
{'group': 'P - S', 'capital': None},
{'group': None, 'capital': 'Phoenix'},
{'group': None, 'capital': 'Pierre'},
{'group': None, 'capital': 'Providence'},
{'group': None, 'capital': 'Raleigh'},
{'group': None, 'capital': 'Richmond'},
{'group': None, 'capital': 'Sacramento'},
{'group': None, 'capital': 'Salem'},
{'group': None, 'capital': 'Salt Lake City'},
{'group': None, 'capital': 'Santa Fe'},
{'group': None, 'capital': 'Springfield'},
{'group': None, 'capital': 'St. Paul'},
{'group': 'T - Z', 'capital': None},
{'group': None, 'capital': 'Tallahassee'},
{'group': None, 'capital': 'Topeka'},
{'group': None, 'capital': 'Trenton'}
)
parent = None
store = Gtk.TreeStore(str)
for item in capitals:
if item['group']:
parent = store.append(None, (item['group'],))
elif item['capital']:
store.append(parent, (item['capital'],))
return store
def is_capital_sensistive(self, cell_layout, cell, tree_model, treeiter, data):
sensitive = not tree_model.iter_has_child(treeiter)
cell.set_property('sensitive', sensitive)
def fill_combo_entry(self, entry):
entry.append_text('One')
entry.append_text('Two')
entry.append_text('2\302\275')
entry.append_text('Three')
def main(demoapp=None):
app = ComboboxApp(demoapp)
Gtk.main()
if __name__ == '__main__':
main()
|
Distrotech/pygobject
|
demos/gtk-demo/demos/combobox.py
|
Python
|
lgpl-2.1
| 12,076
|
[
"COLUMBUS"
] |
aad498639762daeb5c13114f33ce8eab384ea2c10d5969eb62483ca69b032506
|
from os import path
from colorsys import hsv_to_rgb, rgb_to_hsv
from collections import OrderedDict
from random import shuffle
import numpy
from pysces.PyscesModelMap import ModelMap
from pysces import Scanner
import pysces
from matplotlib.pyplot import get_cmap
from .. import modeltools
from ..latextools import LatexExpr
from ..utils.plotting import ScanFig, LineData, Data2D
from ..utils.misc import silence_print
from ..utils.misc import DotDict
from ..utils.misc import formatter_factory
exportLAWH = silence_print(pysces.write.exportLabelledArrayWithHeader)
__all__ = ['RateChar']
def strip_nan_from_scan(array_like):
# this function assumes that column
# zero contains valid data (the scan input)
t_f = list(numpy.isnan(array_like[:, 1]))
start = t_f.index(False)
end = len(t_f) - t_f[::-1].index(False)
return array_like[start:end, :]
class RateChar(object):
def __init__(self, mod, min_concrange_factor=100,
max_concrange_factor=100,
scan_points=256,
auto_load=False):
super(RateChar, self).__init__()
self.mod = mod
self.mod.SetQuiet()
self._model_map = ModelMap(mod)
self.mod.doState()
self._analysis_method = 'ratechar'
self._working_dir = modeltools.make_path(self.mod,
self._analysis_method)
self._min_concrange_factor = min_concrange_factor
self._max_concrange_factor = max_concrange_factor
self._scan_points = scan_points
self._ltxe = LatexExpr(self.mod)
for species in self.mod.species:
setattr(self, species, None)
if auto_load:
self.load_session()
def do_ratechar(self, fixed='all',
scan_min=None,
scan_max=None,
min_concrange_factor=None,
max_concrange_factor=None,
scan_points=None,
solver=0,
auto_save=False):
# this function wraps _do_scan functionality in a user friendly bubble
if fixed == 'all':
to_scan = self.mod.species
elif type(fixed) is list or type(fixed) is tuple:
for each in fixed:
assert each in self.mod.species, 'Invalid species'
to_scan = fixed
else:
assert fixed in self.mod.species, 'Invalid species'
to_scan = [fixed]
for each in to_scan:
fixed_mod, fixed_ss = self._fix_at_ss(each)
scan_start = self._min_max_chooser(fixed_ss,
scan_min,
min_concrange_factor,
'min')
scan_end = self._min_max_chooser(fixed_ss,
scan_max,
max_concrange_factor,
'max')
# here there could be a situation where a scan_min > scan_max
# I wonder what will happen....
if not scan_points:
scan_points = self._scan_points
column_names, results = self._do_scan(fixed_mod,
each,
scan_start,
scan_end,
scan_points)
cleaned_results = strip_nan_from_scan(results)
rcd = RateCharData(fixed_ss,
fixed_mod,
self.mod,
column_names,
cleaned_results,
self._model_map,
self._ltxe)
setattr(self, each, rcd)
if auto_save:
self.save_session()
def _min_max_chooser(self, ss, point, concrange, min_max):
# chooses a minimum or maximum point based
# on the information given by a user
# ie if a specific min/max point is given - use that
# if only concentration range is given -use that
# if nothing is given - use the defualt conc_range_factor
# pretty simple stuff
if point:
the_point = point
if not point and concrange:
if min_max == 'min':
the_point = ss / concrange
elif min_max == 'max':
the_point = ss * concrange
if not point and not concrange:
if min_max == 'min':
the_point = ss / self._min_concrange_factor
elif min_max == 'max':
the_point = ss * self._max_concrange_factor
return the_point
@silence_print
def _do_scan(self,
fixed_mod,
fixed,
scan_min,
scan_max,
scan_points,
solver=0):
# do scan is a simplified interface to pysces.Scanner
# more intuitive than Scan1 (functional vs OO??)
# returns the names of the scanned blocks together with
# the results of the scan
assert solver in (0, 1, 2), 'Solver mode can only be one of 0, 1 or 2'
fixed_mod.mode_solver = solver
demand_blocks = [
'J_' + r for r in getattr(self._model_map, fixed).isSubstrateOf()]
supply_blocks = [
'J_' + r for r in getattr(self._model_map, fixed).isProductOf()]
user_output = [fixed] + demand_blocks + supply_blocks
scanner = Scanner(fixed_mod)
scanner.quietRun = True
scanner.addScanParameter(
fixed, scan_min, scan_max, scan_points, log=True)
scanner.addUserOutput(*user_output)
scanner.Run()
return user_output, scanner.UserOutputResults
@silence_print
def _fix_at_ss(self, fixed):
# fixes the metabolite at the steady_state
# (calls psctb.modeltools.fix_metabolite)
# and returns both the ss value and the fixed model
self.mod.doState()
fixed_ss = getattr(self.mod, fixed + '_ss')
fixed_mod = modeltools.fix_metabolite(self.mod, fixed)
fixed_mod.SetQuiet()
# i don't like this approach at all, too many possible unintended side
# effects
# setattr(fixed_mod, fixed, fixed_ss)
# setattr(fixed_mod, 'fixed', fixed)
# setattr(fixed_mod, 'fixed_ss', fixed_ss)
fixed_mod.doState()
return fixed_mod, fixed_ss
def save_session(self, file_name=None):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='save_data',
fmt='npz',
file_name=file_name,
write_suffix=False)
to_save = {}
for species in self.mod.species:
species_object = getattr(self, species)
try:
column_array = numpy.array(species_object._column_names)
scan_results = species_object._scan_results
to_save['col_{0}'.format(species)] = column_array
to_save['res_{0}'.format(species)] = scan_results
except:
pass
numpy.savez(file_name, **to_save)
def save_results(self, folder=None, separator=',',format='%f'):
base_folder = folder
for species in self.mod.species:
if folder:
folder = path.join(base_folder, species)
getattr(self, species).save_all_results(folder=folder,
separator=separator)
def load_session(self, file_name=None):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='save_data',
fmt='npz',
file_name=file_name,
write_suffix=False)
loaded_data = {}
try:
with numpy.load(file_name) as data_file:
for k, v in data_file.iteritems():
loaded_data[k] = v
except IOError as e:
raise e
for species in self.mod.species:
try:
column_names = [str(each) for each in
list(loaded_data['col_{0}'.format(species)])]
scan_results = loaded_data['res_{0}'.format(species)]
fixed_species = species
fixed_mod, fixed_ss = self._fix_at_ss(fixed_species)
rcd = RateCharData(fixed_ss=fixed_ss,
fixed_mod=fixed_mod,
basemod=self.mod, column_names=column_names,
scan_results=scan_results,
model_map=self._model_map, ltxe=self._ltxe)
setattr(self, fixed_species, rcd)
except:
pass
class RateCharData(object):
def __init__(self,
fixed_ss,
fixed_mod,
basemod,
column_names,
scan_results,
model_map,
ltxe):
super(RateCharData, self).__init__()
self.mod = fixed_mod
self.scan_results = DotDict()
self.mca_results = DotDict()
self._slope_range_factor = 3.0
self.scan_results['fixed'] = column_names[0]
self.scan_results['fixed_ss'] = fixed_ss
self.scan_results['scan_range'] = scan_results[:, 0]
self.scan_results['flux_names'] = column_names[1:]
self.scan_results['flux_data'] = scan_results[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self.scan_results['flux_max'] = None
self.scan_results['flux_min'] = None
self.scan_results['scan_max'] = None
self.scan_results['scan_min'] = None
self.scan_results['ec_names'] = None
self.scan_results['ec_data'] = None
self.scan_results['rc_names'] = None
self.scan_results['rc_data'] = None
self.scan_results['prc_names'] = None
self.scan_results['prc_data'] = None
self._column_names = column_names
self._scan_results = scan_results
self._model_map = model_map
self._analysis_method = 'ratechar'
self._basemod = basemod
self._working_dir = modeltools.make_path(self._basemod,
self._analysis_method,
[self.scan_results.fixed])
self._ltxe = ltxe
self._color_dict_ = None
self._data_setup()
self.mca_results._ltxe = ltxe
self.mca_results._make_repr(
'"$" + self._ltxe.expression_to_latex(k) + "$"', 'v',
formatter_factory())
# del self.scan_results
# del self.mca_results
def _data_setup(self):
# reset value to do mcarc
setattr(self.mod, self.scan_results.fixed, self.scan_results.fixed_ss)
self.mod.doMcaRC()
self._make_attach_total_fluxes()
self._min_max_setup()
self._attach_fluxes_to_self()
self._make_all_coefficient_lines()
self._attach_all_coefficients_to_self()
self._make_all_summary()
self._make_all_line_data()
def _change_colour_order(self, order=None):
if not order:
order = self._color_dict_.keys()
shuffle(order)
self._color_dict_ = dict(zip(order, self._color_dict_.values()))
self._make_all_line_data()
def _make_all_line_data(self):
self._make_flux_ld()
self._make_ec_ld()
self._make_rc_ld()
self._make_prc_ld()
self._make_total_flux_ld()
self._line_data_dict = OrderedDict()
self._line_data_dict.update(self._prc_ld_dict)
self._line_data_dict.update(self._flux_ld_dict)
self._line_data_dict.update(self._total_flux_ld_dict)
self._line_data_dict.update(self._ec_ld_dict)
self._line_data_dict.update(self._rc_ld_dict)
del self._flux_ld_dict
del self._ec_ld_dict
del self._rc_ld_dict
del self._prc_ld_dict
del self._total_flux_ld_dict
def _make_all_summary(self):
self._make_ec_summary()
self._make_cc_summary()
self._make_rc_summary()
self._make_prc_summary()
self.mca_results.update(self._ec_summary)
self.mca_results.update(self._cc_summary)
self.mca_results.update(self._rc_summary)
self.mca_results.update(self._prc_summary)
del self._ec_summary
del self._cc_summary
del self._rc_summary
del self._prc_summary
def _make_ec_summary(self):
ecs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for reaction in all_reactions:
name = 'ec%s_%s' % (reaction, self.scan_results.fixed)
val = getattr(self.mod, name)
ecs[name] = val
self._ec_summary = ecs
def _make_rc_summary(self):
rcs = {}
for flux in self.scan_results.flux_names:
reaction = flux[2:]
name = '%s_%s' % (reaction, self.scan_results.fixed)
val = getattr(self.mod.rc, name)
name = 'rcJ' + name
rcs[name] = val
self._rc_summary = rcs
def _make_cc_summary(self):
ccs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for flux_reaction in reagent_of:
for reaction in all_reactions:
name = 'ccJ%s_%s' % (flux_reaction, reaction)
val = getattr(self.mod, name)
ccs[name] = val
self._cc_summary = ccs
def _make_prc_summary(self):
prcs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for flux_reaction in reagent_of:
for route_reaction in all_reactions:
ec = getattr(self.mod,
'ec%s_%s' % (
route_reaction, self.scan_results.fixed))
cc = getattr(self.mod,
'ccJ%s_%s' % (flux_reaction, route_reaction))
val = ec * cc
name = 'prcJ%s_%s_%s' % (flux_reaction,
self.scan_results.fixed,
route_reaction)
prcs[name] = val
self._prc_summary = prcs
def save_summary(self, file_name=None, separator=',',fmt='%f'):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='mca_summary',
fmt='csv',
file_name=file_name, )
keys = self.mca_results.keys()
keys.sort()
values = numpy.array([self.mca_results[k]
for k in keys]).reshape(len(keys), 1)
try:
exportLAWH(values,
names=keys,
header=['Value'],
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
def save_flux_results(self, file_name=None, separator=',',fmt='%f'):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='flux_results',
fmt='csv',
file_name=file_name, )
scan_points = self.scan_results.scan_points
all_cols = numpy.hstack([
self._scan_results,
self.scan_results.total_supply.reshape(scan_points, 1),
self.scan_results.total_demand.reshape(scan_points, 1)])
column_names = self._column_names + ['Total Supply', 'Total Demand']
try:
exportLAWH(all_cols,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
def save_coefficient_results(self,
coefficient,
file_name=None,
separator=',',
folder=None,
fmt='%f'):
assert_message = 'coefficient must be one of "ec", "rc" or "prc"'
assert coefficient in ['rc', 'ec', 'prc'], assert_message
base_name = coefficient + '_results'
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=base_name,
fmt='csv',
file_name=file_name, )
results = getattr(self.scan_results, coefficient + '_data')
names = getattr(self.scan_results, coefficient + '_names')
new_names = []
for each in names:
new_names.append('x_vals')
new_names.append(each)
try:
exportLAWH(results,
names=None,
header=new_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
# TODO fix this method so that folder is a parameter only her
def save_all_results(self, folder=None, separator=',',fmt='%f'):
if not folder:
folder = self._working_dir
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename='flux_results',
fmt='csv')
self.save_flux_results(separator=separator, file_name=file_name,fmt=fmt)
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename='mca_summary',
fmt='csv')
self.save_summary(separator=separator, file_name=file_name, fmt=fmt)
for each in ['ec', 'rc', 'prc']:
base_name = each + '_results'
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename=base_name,
fmt='csv')
self.save_coefficient_results(coefficient=each,
separator=separator,
file_name=file_name,
fmt=fmt)
def _min_max_setup(self):
# Negative minimum linear values mean nothing
# because they don't translate to a log space
# therefore we want the minimum non-negative/non-zero values.
# lets make sure there are no zeros
n_z_f = self.scan_results.flux_data[
numpy.nonzero(self.scan_results.flux_data)]
n_z_s = self.scan_results.scan_range[
numpy.nonzero(self.scan_results.scan_range)]
totals = numpy.vstack([self.scan_results.total_demand,
self.scan_results.total_supply])
n_z_t = totals[numpy.nonzero(totals)]
# and that the array is not now somehow empty
# although if this happens-you have bigger problems
if len(n_z_f) == 0:
n_z_f = numpy.array([0.01, 1])
if len(n_z_s) == 0:
n_z_s = numpy.array([0.01, 1])
# lets also (clumsily) find the non-negative mins and maxes
# by converting to logspace (to get NaNs) and back
# and then getting the min/max non-NaN
# PS flux max is the max of the totals
with numpy.errstate(all='ignore'):
self.scan_results.flux_max = numpy.nanmax(10 ** numpy.log10(n_z_t))
self.scan_results.flux_min = numpy.nanmin(10 ** numpy.log10(n_z_f))
self.scan_results.scan_max = numpy.nanmax(10 ** numpy.log10(n_z_s))
self.scan_results.scan_min = numpy.nanmin(10 ** numpy.log10(n_z_s))
def _attach_fluxes_to_self(self):
for i, each in enumerate(self.scan_results.flux_names):
# setattr(self, each, self.scan_results.flux_data[:, i])
self.scan_results[each] = self.scan_results.flux_data[:, i]
def _attach_all_coefficients_to_self(self):
setup_for = ['ec', 'rc', 'prc']
for each in setup_for:
eval('self._attach_coefficients_to_self(self.scan_results.' + each + '_names,\
self.scan_results.' + each + '_data)')
def _make_all_coefficient_lines(self):
setup_for = ['ec', 'rc', 'prc']
for each in setup_for:
eval('self._make_' + each + '_lines()')
def _make_attach_total_fluxes(self):
demand_blocks = getattr(
self._model_map, self.scan_results.fixed).isSubstrateOf()
supply_blocks = getattr(
self._model_map, self.scan_results.fixed).isProductOf()
dem_pos = [self.scan_results.flux_names.index('J_' + flux)
for flux in demand_blocks]
sup_pos = [self.scan_results.flux_names.index('J_' + flux)
for flux in supply_blocks]
self.scan_results['total_demand'] = numpy.sum(
[self.scan_results.flux_data[:, i]
for i in dem_pos],
axis=0)
self.scan_results['total_supply'] = numpy.sum(
[self.scan_results.flux_data[:, i]
for i in sup_pos],
axis=0)
def _make_rc_lines(self):
names = []
resps = []
for each in self.scan_results.flux_names:
reaction = each[2:]
name = reaction + '_' + self.scan_results.fixed
J_ss = getattr(self.mod, each)
slope = getattr(self.mod.rc, name)
resp = self._tangent_line(J_ss, slope)
name = 'rcJ' + name
names.append(name)
resps.append(resp)
resps = numpy.hstack(resps)
self.scan_results.rc_names = names
self.scan_results.rc_data = resps
def _make_prc_lines(self):
names = []
prcs = []
reagent_of = [each[2:] for each in self.scan_results.flux_names]
all_reactions = reagent_of + \
getattr(self._model_map,
self.scan_results.fixed).isModifierOf()
for flux_reaction in self.scan_results.flux_names:
J_ss = getattr(self.mod, flux_reaction)
reaction = flux_reaction[2:]
for route_reaction in all_reactions:
ec = getattr(
self.mod,
'ec' + route_reaction + '_' + self.scan_results.fixed)
cc = getattr(self.mod, 'ccJ' + reaction + '_' + route_reaction)
slope = ec * cc
prc = self._tangent_line(J_ss, slope)
name = 'prcJ%s_%s_%s' % (reaction,
self.scan_results.fixed,
route_reaction)
names.append(name)
prcs.append(prc)
prcs = numpy.hstack(prcs)
self.scan_results.prc_names = names
self.scan_results.prc_data = prcs
def _make_ec_lines(self):
names = []
elasts = []
for each in self.scan_results.flux_names:
reaction = each[2:]
name = 'ec' + reaction + '_' + self.scan_results.fixed
J_ss = getattr(self.mod, each)
slope = getattr(self.mod, name)
elast = self._tangent_line(J_ss, slope)
names.append(name)
elasts.append(elast)
elasts = numpy.hstack(elasts)
self.scan_results.ec_names = names
self.scan_results.ec_data = elasts
def _attach_coefficients_to_self(self, names, tangent_lines):
sp = 0
ep = 2
for name in names:
# setattr(self, name, tangent_lines[:, sp:ep])
self.scan_results[name] = tangent_lines[:, sp:ep]
sp = ep
ep += 2
def _tangent_line(self, J_ss, slope):
fix_ss = self.scan_results.fixed_ss
constant = J_ss / (fix_ss ** slope)
ydist = numpy.log10(self.scan_results.flux_max / self.scan_results.flux_min)
xdist = numpy.log10(self.scan_results.scan_max / self.scan_results.scan_min)
golden_ratio = (1 + numpy.sqrt(5)) / 2
xyscale = xdist / (ydist * golden_ratio * 1.5)
scale_factor = numpy.cos(numpy.arctan(slope * xyscale))
distance = numpy.log10(self._slope_range_factor) * scale_factor
range_min = fix_ss / (10 ** distance)
range_max = fix_ss * (10 ** distance)
scan_range = numpy.linspace(range_min, range_max, num=2)
rate = constant * scan_range ** (slope)
return numpy.vstack((scan_range, rate)).transpose()
@property
def _color_dict(self):
if not self._color_dict_:
fix_map = getattr(self._model_map, self.scan_results.fixed)
relavent_reactions = fix_map.isProductOf() + \
fix_map.isSubstrateOf() + \
fix_map.isModifierOf()
num_of_cols = len(relavent_reactions) + 3
cmap = get_cmap('Set2')(
numpy.linspace(0, 1.0, num_of_cols))[:, :3]
color_list = [rgb_to_hsv(*cmap[i, :]) for i in range(num_of_cols)]
relavent_reactions.sort()
color_dict = dict(
zip(['Total Supply'] +
['J_' + reaction for reaction in relavent_reactions] +
['Total Demand'],
color_list))
# just to darken the colors a bit
for k, v in color_dict.iteritems():
color_dict[k] = [v[0], 1, v[2]]
self._color_dict_ = color_dict
return self._color_dict_
def _make_flux_ld(self):
color_dict = self._color_dict
flux_ld_dict = {}
demand_blocks = ['J_' + dem_reac for dem_reac in getattr(
self._model_map, self.scan_results.fixed).isSubstrateOf()]
supply_blocks = ['J_' + sup_reac for sup_reac in getattr(
self._model_map, self.scan_results.fixed).isProductOf()]
for flux in self.scan_results.flux_names:
flux_col = self.scan_results.flux_names.index(flux)
x_data = self.scan_results.scan_range
y_data = self.scan_results.flux_data[:, flux_col]
latex_expr = self._ltxe.expression_to_latex(flux)
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1],
flux_color[2] * 0.9)
for dem in demand_blocks:
if dem == flux:
flux_ld_dict[flux] = \
LineData(name=flux,
x_data=x_data,
y_data=y_data,
categories=['Fluxes',
'Demand',
flux],
properties={'label': '$%s$' % latex_expr,
'color': color})
break
for sup in supply_blocks:
if sup == flux:
flux_ld_dict[flux] = \
LineData(name=flux,
x_data=x_data,
y_data=y_data,
categories=['Fluxes',
'Supply',
flux],
properties={'label': '$%s$' % latex_expr,
'color': color})
break
self._flux_ld_dict = flux_ld_dict
def _make_ec_ld(self):
ec_ld_dict = {}
for ec_name in self.scan_results.ec_names:
for flux, flux_ld in self._flux_ld_dict.iteritems():
ec_reaction = flux[2:]
if 'ec' + ec_reaction + '_' + self.scan_results.fixed in ec_name:
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1] * 0.5,
flux_color[2])
ec_data = self.scan_results[ec_name]
categories = ['Elasticity Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(ec_name)
ec_ld_dict[ec_name] = \
LineData(name=ec_name,
x_data=ec_data[:, 0],
y_data=ec_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color})
self._ec_ld_dict = ec_ld_dict
def _make_rc_ld(self):
rc_ld_dict = {}
for rc_name in self.scan_results.rc_names:
for flux, flux_ld in self._flux_ld_dict.iteritems():
rc_flux = 'J' + flux[2:]
if 'rc' + rc_flux + '_' in rc_name:
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1],
flux_color[2] * 0.7)
rc_data = self.scan_results[rc_name]
categories = ['Response Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(rc_name)
rc_ld_dict[rc_name] = \
LineData(name=rc_name,
x_data=rc_data[:, 0],
y_data=rc_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color,
'ls': '--'})
self._rc_ld_dict = rc_ld_dict
def _make_prc_ld(self):
def get_prc_route(prc, flux, fixed):
without_prefix = prc.split('prc')[1]
without_flux = without_prefix.split(flux)[1][1:]
route = without_flux.split(fixed)[1][1:]
return route
prc_ld_dict = {}
for prc_name in self.scan_results.prc_names:
for flux, flux_ld in self._flux_ld_dict.iteritems():
prc_flux = 'J' + flux[2:]
if 'prc' + prc_flux + '_' + self.scan_results.fixed in prc_name:
route_reaction = get_prc_route(prc_name,
prc_flux,
self.scan_results.fixed)
flux_color = self._color_dict['J_' + route_reaction]
color = hsv_to_rgb(flux_color[0],
flux_color[1] * 0.5,
flux_color[2])
prc_data = self.scan_results[prc_name]
categories = ['Partial Response Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(prc_name)
prc_ld_dict[prc_name] = \
LineData(name=prc_name,
x_data=prc_data[:, 0],
y_data=prc_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color})
self._prc_ld_dict = prc_ld_dict
def _make_total_flux_ld(self):
total_flux_ld_dict = {}
col = self._color_dict['Total Supply']
total_flux_ld_dict['Total Supply'] = \
LineData(name='Total Supply',
x_data=self.scan_results.scan_range,
y_data=self.scan_results.total_supply,
categories=['Fluxes',
'Supply',
'Total Supply'],
properties={'label': '$%s$' % 'Total\,Supply',
'color': hsv_to_rgb(col[0], col[1],
col[2] * 0.9),
'ls': '--'})
col = self._color_dict['Total Demand']
total_flux_ld_dict['Total Demand'] = \
LineData(name='Total Demand',
x_data=self.scan_results.scan_range,
y_data=self.scan_results.total_demand,
categories=['Fluxes',
'Demand',
'Total Demand'],
properties={'label': '$%s$' % 'Total\,Demand',
'color': hsv_to_rgb(col[0], col[1],
col[2] * 0.9),
'ls': '--'})
self._total_flux_ld_dict = total_flux_ld_dict
def plot(self):
category_classes = OrderedDict([
('Supply/Demand', [
'Supply',
'Demand']),
('Reaction Blocks',
self.scan_results.flux_names +
['Total Supply', 'Total Demand']),
('Lines', [
'Fluxes',
'Elasticity Coefficients',
'Response Coefficients',
'Partial Response Coefficients'])])
line_data_list = [v for v in self._line_data_dict.itervalues()]
scan_fig = ScanFig(line_data_list,
ax_properties={'xlabel': '[%s]' %
self.scan_results.fixed.replace(
'_', ' '),
'ylabel': 'Rate',
'xscale': 'log',
'yscale': 'log',
'xlim': [self.scan_results.scan_min,
self.scan_results.scan_max],
'ylim': [self.scan_results.flux_min,
self.scan_results.flux_max * 2
]},
category_classes=category_classes,
base_name=self._analysis_method,
working_dir=self._working_dir)
scan_fig.toggle_category('Supply', True)
scan_fig.toggle_category('Demand', True)
scan_fig.toggle_category('Fluxes', True)
scan_fig.ax.axvline(self.scan_results.fixed_ss, ls=':', color='gray')
return scan_fig
def plot_decompose(self):
from warnings import warn, simplefilter
simplefilter('always', DeprecationWarning)
warn('plot_decompose has been renamed to `do_mca_scan, use that '
'method in the future`', DeprecationWarning, stacklevel=1)
simplefilter('default', DeprecationWarning)
return self.do_mca_scan()
@silence_print
def do_mca_scan(self):
ecs = []
ccs = []
prc_names = []
rc_names = []
rc_pos = []
reagent_of = [each[2:] for each in self.scan_results.flux_names]
all_reactions = reagent_of + \
getattr(self._model_map,
self.scan_results.fixed).isModifierOf()
arl = len(all_reactions)
strt = 0
stp = arl
for flux_reaction in self.scan_results.flux_names:
reaction = flux_reaction[2:]
rc_names.append('rcJ%s_%s' % (reaction, self.scan_results.fixed))
rc_pos.append(range(strt, stp))
strt += arl
stp += arl
for route_reaction in all_reactions:
ec = 'ec' + route_reaction + '_' + self.scan_results.fixed
cc = 'ccJ' + reaction + '_' + route_reaction
name = 'prcJ%s_%s_%s' % (reaction,
self.scan_results.fixed,
route_reaction)
# ecs.append(ec)
if ec not in ecs:
ecs.append(ec)
ccs.append(cc)
prc_names.append(name)
ec_len = len(ecs)
user_output = [self.scan_results.fixed] + ecs + ccs
scanner = pysces.Scanner(self.mod)
scanner.quietRun = True
scanner.addScanParameter(self.scan_results.fixed,
self.scan_results.scan_min,
self.scan_results.scan_max,
self.scan_results.scan_points,
log=True)
scanner.addUserOutput(*user_output)
scanner.Run()
ax_properties = {'ylabel': 'Coefficient Value',
'xlabel': '[%s]' %
self.scan_results.fixed.replace('_', ' '),
'xscale': 'log',
'yscale': 'linear',
'xlim': [self.scan_results.scan_min,
self.scan_results.scan_max]}
cc_ec_data_obj = Data2D(mod=self.mod,
column_names=user_output,
data_array=scanner.UserOutputResults,
ltxe=self._ltxe,
analysis_method=self._analysis_method,
ax_properties=ax_properties,
file_name='cc_ec_scan',
num_of_groups=ec_len,
working_dir=path.split(self._working_dir)[0])
rc_data = []
all_outs = scanner.UserOutputResults[:, 1:]
ec_outs = all_outs[:, :ec_len]
cc_outs = all_outs[:, ec_len:]
ec_positions = range(ec_len) * (len(prc_names)/ec_len)
for i, prc_name in enumerate(prc_names):
ec_col_data = ec_outs[:, ec_positions[i]]
cc_col_data = cc_outs[:, i]
# ec_col_data = outs[:, i]
# cc_col_data = outs[:, i + cc_s_pos]
col = ec_col_data * cc_col_data
rc_data.append(col)
temp = numpy.vstack(rc_data).transpose()
rc_data += [numpy.sum(temp[:, rc_pos[i]], axis=1) for i in
range(len(rc_names))]
rc_out_arr = [scanner.UserOutputResults[:, 0]] + rc_data
rc_out_arr = numpy.vstack(rc_out_arr).transpose()
rc_data_obj = Data2D(mod=self.mod,
column_names=[self.scan_results.fixed] + prc_names + rc_names,
data_array=rc_out_arr,
ltxe=self._ltxe,
analysis_method=self._analysis_method,
ax_properties=ax_properties,
file_name='prc_scan',
num_of_groups=ec_len,
working_dir=path.split(self._working_dir)[0])
#rc_data_obj._working_dir = path.split(self._working_dir)[0]
#cc_ec_data_obj._working_dir = path.split(self._working_dir)[0]
return rc_data_obj, cc_ec_data_obj
|
exe0cdc/PyscesToolbox
|
psctb/analyse/_ratechar.py
|
Python
|
bsd-3-clause
| 41,049
|
[
"PySCeS"
] |
fa3c55ca935d9514055a8b62cecff11946d8f79dd31c542aa15dcfccce5fc9b6
|
""" test StoragElement
"""
import os
import tempfile
import mock
import unittest
import itertools
from diraccfg import CFG
from DIRAC import S_OK
from DIRAC.Resources.Storage.StorageElement import StorageElementItem
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.private.ConfigurationClient import ConfigurationClient
class fake_SRM2Plugin(StorageBase):
"""Fake SRM2 plugin.
Only implements the two methods needed
for transfer, so we can test that it is really this plugin
that returned
"""
def putFile(self, lfns, sourceSize=0):
return S_OK({"Successful": dict.fromkeys(lfns, "srm:putFile"), "Failed": {}})
def getTransportURL(self, path, protocols=False):
return S_OK({"Successful": dict.fromkeys(path, "srm:getTransportURL"), "Failed": {}})
class fake_XROOTPlugin(StorageBase):
"""Fake XROOT plugin.
Only implements the two methods needed
for transfer, so we can test that it is really this plugin
that returned
"""
def putFile(self, lfns, sourceSize=0):
return S_OK({"Successful": dict.fromkeys(lfns, "root:putFile"), "Failed": {}})
def getTransportURL(self, path, protocols=False):
return S_OK({"Successful": dict.fromkeys(path, "root:getTransportURL"), "Failed": {}})
class fake_GSIFTPPlugin(StorageBase):
"""Fake GSIFTP plugin.
Only implements the two methods needed
for transfer, so we can test that it is really this plugin
that returned
"""
def putFile(self, lfns, sourceSize=0):
return S_OK({"Successful": dict.fromkeys(lfns, "gsiftp:putFile"), "Failed": {}})
def getTransportURL(self, path, protocols=False):
return S_OK({"Successful": dict.fromkeys(path, "gsiftp:getTransportURL"), "Failed": {}})
def mock_StorageFactory_generateStorageObject(storageName, pluginName, parameters, hideExceptions=False):
"""Generate fake storage object"""
storageObj = StorageBase(storageName, parameters)
if pluginName == "SRM2":
storageObj = fake_SRM2Plugin(storageName, parameters)
storageObj.protocolParameters["InputProtocols"] = ["file", "root", "srm"]
storageObj.protocolParameters["OutputProtocols"] = ["file", "root", "dcap", "gsidcap", "rfio", "srm"]
elif pluginName == "File":
# Not needed to do anything, StorageBase should do it :)
pass
elif pluginName == "XROOT":
storageObj = fake_XROOTPlugin(storageName, parameters)
storageObj.protocolParameters["InputProtocols"] = ["file", "root"]
storageObj.protocolParameters["OutputProtocols"] = ["root"]
elif pluginName == "GSIFTP":
storageObj = fake_GSIFTPPlugin(storageName, parameters)
storageObj.protocolParameters["InputProtocols"] = ["file", "gsiftp"]
storageObj.protocolParameters["OutputProtocols"] = ["gsiftp"]
storageObj.pluginName = pluginName
return S_OK(storageObj)
class TestBase(unittest.TestCase):
"""Base test class. Defines all the method to test"""
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._StorageFactory__generateStorageObject",
side_effect=mock_StorageFactory_generateStorageObject,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def setUp(self, _mk_generateStorage, _mk_isLocalSE, _mk_addAccountingOperation):
# Creating test configuration file
self.testCfgFileName = os.path.join(tempfile.gettempdir(), "test_StorageElement.cfg")
cfgContent = """
DIRAC
{
Setup=TestSetup
}
Resources{
StorageElements{
StorageA
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
AccessProtocol.0
{
Host =
PluginName = File
Protocol = file
Path =
}
}
StorageB
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
AccessProtocol.0
{
Host =
PluginName = SRM2
Protocol = srm
Path =
}
}
StorageC
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
AccessProtocol.0
{
Host =
PluginName = XROOT
Protocol = root
Path =
}
}
StorageD
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
AccessProtocol.0
{
Host =
PluginName = SRM2
Protocol = srm
Path =
}
AccessProtocol.1
{
Host =
PluginName = XROOT
Protocol = root
Path =
}
}
StorageE
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
WriteProtocols = root
WriteProtocols += srm
AccessProtocol.0
{
Host =
PluginName = SRM2
Protocol = srm
Path =
}
AccessProtocol.1
{
Host =
PluginName = XROOT
Protocol = root
Path =
}
}
StorageX
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
WriteProtocols = gsiftp
AccessProtocols = root
AccessProtocol.0
{
Host =
PluginName = GSIFTP
Protocol = gsiftp
Path =
}
AccessProtocol.1
{
Host =
PluginName = XROOT
Protocol = root
Path =
}
}
StorageY
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
AccessProtocols = gsiftp
AccessProtocols += srm
AccessProtocol.0
{
Host =
PluginName = GSIFTP
Protocol = gsiftp
Path =
}
AccessProtocol.1
{
Host =
PluginName = SRM2
Protocol = srm
Path =
}
}
StorageZ
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
AccessProtocols = root
AccessProtocols += srm
WriteProtocols = root
WriteProtocols += srm
AccessProtocol.0
{
Host =
PluginName = ROOT
Protocol = root
Path =
}
AccessProtocol.1
{
Host =
PluginName = SRM2
Protocol = srm
Path =
}
}
}
}
Operations{
Defaults
{
DataManagement{
AccessProtocols = fakeProto
AccessProtocols += root
WriteProtocols = srm
}
}
}
"""
with open(self.testCfgFileName, "w") as f:
f.write(cfgContent)
# SUPER UGLY: one must recreate the CFG objects of gConfigurationData
# not to conflict with other tests that might be using a local dirac.cfg
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
gConfig = ConfigurationClient(
fileToLoadList=[self.testCfgFileName]
) # we replace the configuration by our own one.
self.seA = StorageElementItem("StorageA")
self.seA.vo = "lhcb"
self.seB = StorageElementItem("StorageB")
self.seB.vo = "lhcb"
self.seC = StorageElementItem("StorageC")
self.seC.vo = "lhcb"
self.seD = StorageElementItem("StorageD")
self.seD.vo = "lhcb"
self.seE = StorageElementItem("StorageE")
self.seE.vo = "lhcb"
self.seX = StorageElementItem("StorageX")
self.seX.vo = "lhcb"
self.seY = StorageElementItem("StorageY")
self.seY.vo = "lhcb"
self.seZ = StorageElementItem("StorageZ")
self.seZ.vo = "lhcb"
def tearDown(self):
try:
os.remove(self.testCfgFileName)
except OSError:
pass
# SUPER UGLY: one must recreate the CFG objects of gConfigurationData
# not to conflict with other tests that might be using a local dirac.cfg
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_01_negociateProtocolWithOtherSE(self, mk_isLocalSE, mk_addAccounting):
"""Testing negotiation algorithm"""
# Find common protocol between SRM2 and File
res = self.seA.negociateProtocolWithOtherSE(self.seB)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], ["file"])
# Find common protocol between File and SRM@
res = self.seB.negociateProtocolWithOtherSE(self.seA)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], ["file"])
# Find common protocol between XROOT and File
# Nothing goes from xroot to file
res = self.seA.negociateProtocolWithOtherSE(self.seC)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], [])
# Find common protocol between File and XROOT
res = self.seC.negociateProtocolWithOtherSE(self.seA)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], ["file"])
# Find common protocol between File and File
res = self.seA.negociateProtocolWithOtherSE(self.seA)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], ["file"])
# Find common protocol between SRM and SRM
res = self.seB.negociateProtocolWithOtherSE(self.seB)
self.assertTrue(res["OK"], res)
self.assertEqual(sorted(res["Value"]), sorted(["file", "root", "srm"]))
# Find common protocol between SRM and XROOT
res = self.seC.negociateProtocolWithOtherSE(self.seB)
self.assertTrue(res["OK"], res)
self.assertEqual(sorted(res["Value"]), sorted(["root", "file"]))
# Find common protocol between XROOT and SRM
res = self.seC.negociateProtocolWithOtherSE(self.seB)
self.assertTrue(res["OK"], res)
self.assertEqual(sorted(res["Value"]), sorted(["root", "file"]))
# Testing restrictions
res = self.seC.negociateProtocolWithOtherSE(self.seB, protocols=["file"])
self.assertTrue(res["OK"], res)
self.assertEqual(sorted(res["Value"]), ["file"])
res = self.seC.negociateProtocolWithOtherSE(self.seB, protocols=["nonexisting"])
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], [])
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_02_followOrder(self, _mk_isLocalSE, _mk_addAccounting):
"""Testing If the order of preferred protocols is respected"""
for permutation in itertools.permutations(["srm", "file", "root", "nonexisting"]):
permuList = list(permutation)
# Don't get tricked ! remove cannot be put
# after the conversion, because it is inplace modification
permuList.remove("nonexisting")
res = self.seD.negociateProtocolWithOtherSE(self.seD, protocols=permutation)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], permuList)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_03_multiProtocolThirdParty(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case for storages with several protocols
Here comes the fun :-)
Suppose we have endpoints that we can read in root, but cannot write
If we have root in the accessProtocols and thirdPartyProtocols lists
but not in the writeProtocols, we should get a root url to read,
and write with SRM
We reproduce here the behavior of DataManager.replicate
"""
thirdPartyProtocols = ["root", "srm"]
lfn = "/lhcb/fake/lfn"
res = self.seD.negociateProtocolWithOtherSE(self.seD, protocols=thirdPartyProtocols)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], thirdPartyProtocols)
# Only the XROOT plugin here implements the geTransportURL
# that returns what we want, so we know that
# if the return is successful, it is because of the XROOT
res = self.seD.getURL(lfn, protocol=res["Value"])
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
srcUrl = res["Value"]["Successful"][lfn]
self.assertEqual(srcUrl, "root:getTransportURL")
# Only the SRM2 plugin here implements the putFile method
# so if we get a success here, it means that we used the SRM plugin
res = self.seD.replicateFile({lfn: srcUrl}, sourceSize=123, inputProtocol="root")
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
self.assertEqual(res["Value"]["Successful"][lfn], "srm:putFile")
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_04_thirdPartyLocalWrite(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case for storages with several protocols
Here, we locally define the write protocol to be root and srm
So we should be able to do everything with XROOT plugin
"""
thirdPartyProtocols = ["root", "srm"]
lfn = "/lhcb/fake/lfn"
res = self.seE.negociateProtocolWithOtherSE(self.seE, protocols=thirdPartyProtocols)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], thirdPartyProtocols)
res = self.seE.getURL(lfn, protocol=res["Value"])
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
srcUrl = res["Value"]["Successful"][lfn]
self.assertEqual(srcUrl, "root:getTransportURL")
res = self.seE.replicateFile({lfn: srcUrl}, sourceSize=123, inputProtocol="root")
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
self.assertEqual(res["Value"]["Successful"][lfn], "root:putFile")
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_05_thirdPartyMix(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case for storages with several protocols
Here, we locally define the write protocol for the destination, so it should
all go directly through the XROOT plugin
"""
thirdPartyProtocols = ["root", "srm"]
lfn = "/lhcb/fake/lfn"
res = self.seE.negociateProtocolWithOtherSE(self.seD, protocols=thirdPartyProtocols)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], thirdPartyProtocols)
res = self.seD.getURL(lfn, protocol=res["Value"])
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
srcUrl = res["Value"]["Successful"][lfn]
self.assertEqual(srcUrl, "root:getTransportURL")
res = self.seE.replicateFile({lfn: srcUrl}, sourceSize=123, inputProtocol="root")
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
self.assertEqual(res["Value"]["Successful"][lfn], "root:putFile")
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_06_thirdPartyMixOpposite(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case for storages with several protocols
Here, we locally define the write protocol for the source, so it should
get the source directly using XROOT, and perform the put using SRM
"""
thirdPartyProtocols = ["root", "srm"]
lfn = "/lhcb/fake/lfn"
res = self.seD.negociateProtocolWithOtherSE(self.seE, protocols=thirdPartyProtocols)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], thirdPartyProtocols)
res = self.seE.getURL(lfn, protocol=res["Value"])
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
srcUrl = res["Value"]["Successful"][lfn]
self.assertEqual(srcUrl, "root:getTransportURL")
res = self.seD.replicateFile({lfn: srcUrl}, sourceSize=123, inputProtocol="root")
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
self.assertEqual(res["Value"]["Successful"][lfn], "srm:putFile")
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_07_multiProtocolSrmOnly(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case for storages with several protocols
Here comes the fun :-)
Suppose we have endpoints that we can read in root, but cannot write
If we have root in the accessProtocols and thirdPartyProtocols lists
but not in the writeProtocols, we should get a root url to read,
and write with SRM
We reproduce here the behavior of DataManager.replicate
"""
thirdPartyProtocols = ["srm"]
lfn = "/lhcb/fake/lfn"
res = self.seD.negociateProtocolWithOtherSE(self.seD, protocols=thirdPartyProtocols)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"], thirdPartyProtocols)
res = self.seD.getURL(lfn, protocol=res["Value"])
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
srcUrl = res["Value"]["Successful"][lfn]
self.assertEqual(srcUrl, "srm:getTransportURL")
res = self.seD.replicateFile({lfn: srcUrl}, sourceSize=123, inputProtocol="srm")
self.assertTrue(res["OK"], res)
self.assertTrue(lfn in res["Value"]["Successful"], res)
self.assertEqual(res["Value"]["Successful"][lfn], "srm:putFile")
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_08_multiProtocolFTS(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case FTS replication between storages with several protocols
Here comes the fun :-)
Suppose we have endpoints that we can read in root, but cannot write
If we have root in the accessProtocols and thirdPartyProtocols lists
but not in the writeProtocols, we should get a root url to read,
and write with SRM.
And We should get the proper url for source and destination
Storage X, Y and Z represents the situation we could now have in LHCb:
* X is RAL Echo: you read with root, write with gsiftp
* Y is Gridka: you have gsiftp available for read only
* Z is CERN EOS: you can do everything with EOS
This makes it necessary to add gsiftp as third party option to write to ECHO
"""
thirdPartyProtocols = ["root", "gsiftp", "srm"]
rankedProtocols = ["root", "gsiftp", "gsidcap", "dcap", "file", "srm", "rfio"]
lfn = "/lhcb/fake/lfn"
# RAL -> GRIDKA
# We should read using root and write through srm
res = self.seY.generateTransferURLsBetweenSEs(lfn, self.seX, protocols=rankedProtocols)
self.assertTrue(res["OK"], res)
urlPair = res["Value"]["Successful"].get(lfn)
self.assertTupleEqual(urlPair, ("root:%s" % lfn, "srm:%s" % lfn))
protoPair = res["Value"]["Protocols"]
self.assertTupleEqual(protoPair, ("root", "srm"))
# RAL -> CERN
# We should read using root and write directly with it
res = self.seZ.generateTransferURLsBetweenSEs(lfn, self.seX, protocols=rankedProtocols)
self.assertTrue(res["OK"], res)
urlPair = res["Value"]["Successful"].get(lfn)
self.assertTupleEqual(urlPair, ("root:%s" % lfn, "root:%s" % lfn))
protoPair = res["Value"]["Protocols"]
self.assertTupleEqual(protoPair, ("root", "root"))
# GRIDKA -> RAL
# We should read using gsiftp and write directly with it
res = self.seX.generateTransferURLsBetweenSEs(lfn, self.seY, protocols=rankedProtocols)
self.assertTrue(res["OK"], res)
urlPair = res["Value"]["Successful"].get(lfn)
self.assertTupleEqual(urlPair, ("gsiftp:%s" % lfn, "gsiftp:%s" % lfn))
protoPair = res["Value"]["Protocols"]
self.assertTupleEqual(protoPair, ("gsiftp", "gsiftp"))
# GRIDKA -> CERN
# We should read using srm and write with root
res = self.seZ.generateTransferURLsBetweenSEs(lfn, self.seY, protocols=rankedProtocols)
self.assertTrue(res["OK"], res)
urlPair = res["Value"]["Successful"].get(lfn)
self.assertTupleEqual(urlPair, ("srm:%s" % lfn, "root:%s" % lfn))
protoPair = res["Value"]["Protocols"]
self.assertTupleEqual(protoPair, ("srm", "root"))
# CERN -> RAL
# We should read using srm and write with gsiftp
res = self.seX.generateTransferURLsBetweenSEs(lfn, self.seZ, protocols=rankedProtocols)
self.assertTrue(res["OK"], res)
urlPair = res["Value"]["Successful"].get(lfn)
self.assertTupleEqual(urlPair, ("srm:%s" % lfn, "gsiftp:%s" % lfn))
protoPair = res["Value"]["Protocols"]
self.assertTupleEqual(protoPair, ("srm", "gsiftp"))
# CERN -> GRIDKA
# We should read using root and write directly with srm
res = self.seY.generateTransferURLsBetweenSEs(lfn, self.seZ, protocols=rankedProtocols)
self.assertTrue(res["OK"], res)
urlPair = res["Value"]["Successful"].get(lfn)
self.assertTupleEqual(urlPair, ("root:%s" % lfn, "srm:%s" % lfn))
protoPair = res["Value"]["Protocols"]
self.assertTupleEqual(protoPair, ("root", "srm"))
class TestSameSE(unittest.TestCase):
"""Tests to compare two SEs together."""
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._StorageFactory__generateStorageObject",
side_effect=mock_StorageFactory_generateStorageObject,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def setUp(self, _mk_generateStorage, _mk_isLocalSE, _mk_addAccountingOperation):
# Creating test configuration file
self.testCfgFileName = os.path.join(tempfile.gettempdir(), "test_StorageElement.cfg")
cfgContent = """
DIRAC
{
Setup=TestSetup
}
Resources{
StorageElements{
DiskStorageA
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
GFAL2_SRM2
{
Host = srm-diskandtape.cern.ch
SpaceToken = Disk
Protocol = srm
Path = /base/pathDisk
}
}
# Same end point as DiskStorageA, but with a different space token
# So they should be considered the same
TapeStorageA
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
GFAL2_SRM2
{
Host = srm-diskandtape.cern.ch
Protocol = srm
SpaceToken = Tape
Path = /base/pathDisk
}
}
# Normally does not happen in practice, but this is the same as DiskStorageA with more plugins
DiskStorageAWithMoreProtocol
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
GFAL2_SRM2
{
Host = srm-diskandtape.cern.ch
SpaceToken = Disk
Protocol = srm
Path = /base/pathDisk
}
GFAL2_GSIFTP
{
Host = gsiftp-diskandtape.cern.ch
SpaceToken = Disk
Protocol = gsiftp
Path = /base/pathDisk
}
}
# A different storage
StorageB
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
GFAL2_GSIFTP
{
Host = otherstorage.cern.ch
SpaceToken = Disk
Protocol = gsiftp
Path = /base/pathDisk
}
}
# The same endpoint as StorageB but with differetn base path, so not the same
StorageBWithOtherBasePath
{
BackendType = local
ReadAccess = Active
WriteAccess = Active
GFAL2_GSIFTP
{
Host = otherstorage.cern.ch
SpaceToken = Disk
Protocol = gsiftp
Path = /base/otherPath
}
}
}
}
Operations{
Defaults
{
DataManagement{
AccessProtocols = fakeProto
AccessProtocols += root
WriteProtocols = srm
}
}
}
"""
with open(self.testCfgFileName, "w") as f:
f.write(cfgContent)
# SUPER UGLY: one must recreate the CFG objects of gConfigurationData
# not to conflict with other tests that might be using a local dirac.cfg
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
gConfig = ConfigurationClient(
fileToLoadList=[self.testCfgFileName]
) # we replace the configuration by our own one.
self.diskStorageA = StorageElementItem("DiskStorageA")
self.diskStorageA.vo = "lhcb"
self.tapeStorageA = StorageElementItem("TapeStorageA")
self.tapeStorageA.vo = "lhcb"
self.diskStorageAWithMoreProtocol = StorageElementItem("DiskStorageAWithMoreProtocol")
self.diskStorageAWithMoreProtocol.vo = "lhcb"
self.storageB = StorageElementItem("StorageB")
self.storageB.vo = "lhcb"
self.storageBWithOtherBasePath = StorageElementItem("StorageBWithOtherBasePath")
self.storageBWithOtherBasePath.vo = "lhcb"
def tearDown(self):
try:
os.remove(self.testCfgFileName)
except OSError:
pass
# SUPER UGLY: one must recreate the CFG objects of gConfigurationData
# not to conflict with other tests that might be using a local dirac.cfg
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_01_compareSEWithItself(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test case to compare SE together
"""
for se in (
self.diskStorageA,
self.tapeStorageA,
self.diskStorageAWithMoreProtocol,
self.storageB,
self.storageBWithOtherBasePath,
):
self.assertTrue(se.isSameSE(se))
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_02_compareSEThatShouldBeTheSame(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test SEs that should be the considered same
"""
matchingCouples = (
(self.diskStorageA, self.diskStorageAWithMoreProtocol),
(self.diskStorageA, self.tapeStorageA),
)
for se1, se2 in matchingCouples:
self.assertTrue(se1.isSameSE(se2))
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_02_compareSEThatShouldBeDifferent(self, _mk_isLocalSE, _mk_addAccounting):
"""
Test SEs that should be the considered same
"""
notMatchingCouples = (
(self.diskStorageA, self.storageB),
(self.tapeStorageA, self.storageB),
(self.storageB, self.storageBWithOtherBasePath),
)
for se1, se2 in notMatchingCouples:
self.assertFalse(se1.isSameSE(se2))
if __name__ == "__main__":
from DIRAC import gLogger
gLogger.setLevel("DEBUG")
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/Storage/test/Test_StorageElement.py
|
Python
|
gpl-3.0
| 32,184
|
[
"DIRAC"
] |
a954429f68b61684610aab253310565cef1d15d0bcf1d4607949892f2d462a98
|
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Parser for DALTON output files"""
from __future__ import print_function
import numpy
from . import logfileparser
from . import utils
class DALTON(logfileparser.Logfile):
"""A DALTON log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(DALTON, self).__init__(logname="DALTON", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "DALTON log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'DALTON("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by DALTON."""
# It appears that DALTON is using the correct labels.
return label
def before_parsing(self):
# Used to decide whether to wipe the atomcoords clean.
self.firststdorient = True
# Use to track which section/program output we are parsing,
# since some programs print out the same headers, which we
# would like to use as triggers.
self.section = None
# If there is no symmetry, assume this.
self.symlabels = ['Ag']
def parse_geometry(self, lines):
"""Parse DALTON geometry lines into an atomcoords array."""
coords = []
for lin in lines:
# Without symmetry there are simply four columns, and with symmetry
# an extra label is printed after the atom type.
cols = lin.split()
if cols[1][0] == "_":
xyz = cols[2:]
else:
xyz = cols[1:]
# The assumption is that DALTON always print in atomic units.
xyz = [utils.convertor(float(x), 'bohr', 'Angstrom') for x in xyz]
coords.append(xyz)
return coords
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# This section at the start of geometry optimization jobs gives us information
# about optimization targets (geotargets) and possibly other things as well.
# Notice how the number of criteria required to converge is set to 2 here,
# but this parameter can (probably) be tweaked in the input.
#
# Chosen parameters for *OPTIMI :
# -------------------------------
#
# Default 1st order method will be used: BFGS update.
# Optimization will be performed in redundant internal coordinates (by default).
# Model Hessian will be used as initial Hessian.
# The model Hessian parameters of Roland Lindh will be used.
#
#
# Trust region method will be used to control step (default).
#
# Convergence threshold for gradient set to : 1.00D-04
# Convergence threshold for energy set to : 1.00D-06
# Convergence threshold for step set to : 1.00D-04
# Number of convergence criteria set to : 2
#
if line.strip()[:25] == "Convergence threshold for":
if not hasattr(self, 'geotargets'):
self.geotargets = []
self.geotargets_names = []
target = self.float(line.split()[-1])
name = line.strip()[25:].split()[0]
self.geotargets.append(target)
self.geotargets_names.append(name)
# This is probably the first place where atomic symmetry labels are printed,
# somewhere afer the SYMGRP point group information section. We need to know
# which atom is in which symmetry, since this influences how some things are
# print later on. We can also get some generic attributes along the way.
#
# Isotopic Masses
# ---------------
#
# C _1 12.000000
# C _2 12.000000
# C _1 12.000000
# C _2 12.000000
# ...
#
# Note that when there is no symmetry there are only two columns here.
#
# It is also a good idea to keep in mind that DALTON, with symmetry on, operates
# in a specific point group, so symmetry atoms have no internal representation.
# Therefore only atoms marked as "_1" or "#1" in other places are actually
# represented in the model. The symmetry atoms (higher symmetry indices) are
# generated on the fly when writing the output. We will save the symmetry indices
# here for later use.
#
# Additional note: the symmetry labels are printed only for atoms that have
# symmetry images... so assume "_1" if a label is missing. For example, there will
# be no label for atoms on an axes, such as the oxygen in water in C2v:
#
# O 15.994915
# H _1 1.007825
# H _2 1.007825
#
if line.strip() == "Isotopic Masses":
self.skip_lines(inputfile, ['d', 'b'])
# Since some symmetry labels may be missing, read in all lines first.
lines = []
line = next(inputfile)
while line.strip():
lines.append(line)
line = next(inputfile)
# Split lines into columsn and dd any missing symmetry labels, if needed.
lines = [l.split() for l in lines]
if any([len(l) == 3 for l in lines]):
for il, l in enumerate(lines):
if len(l) == 2:
lines[il] = [l[0], "_1", l[1]]
atomnos = []
symmetry_atoms = []
atommasses = []
for cols in lines:
atomnos.append(self.table.number[cols[0]])
if len(cols) == 3:
symmetry_atoms.append(int(cols[1][1]))
atommasses.append(float(cols[2]))
else:
atommasses.append(float(cols[1]))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atommasses', atommasses)
self.set_attribute('natom', len(atomnos))
self.set_attribute('natom', len(atommasses))
# Save this for later if there were any labels.
self.symmetry_atoms = symmetry_atoms or None
# This section is close to the beginning of the file, and can be used
# to parse natom, nbasis and atomnos. We also construct atombasis here,
# although that is symmetry-dependent (see inline comments). Note that
# DALTON operates on the idea of atom type, which are not necessarily
# unique element-wise.
#
# Atoms and basis sets
# --------------------
#
# Number of atom types : 6
# Total number of atoms: 20
#
# Basis set used is "STO-3G" from the basis set library.
#
# label atoms charge prim cont basis
# ----------------------------------------------------------------------
# C 6 6.0000 15 5 [6s3p|2s1p]
# H 4 1.0000 3 1 [3s|1s]
# C 2 6.0000 15 5 [6s3p|2s1p]
# H 2 1.0000 3 1 [3s|1s]
# C 2 6.0000 15 5 [6s3p|2s1p]
# H 4 1.0000 3 1 [3s|1s]
# ----------------------------------------------------------------------
# total: 20 70.0000 180 60
# ----------------------------------------------------------------------
#
# Threshold for neglecting AO integrals: 1.00D-12
#
if line.strip() == "Atoms and basis sets":
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert "Number of atom types" in line
self.ntypes = int(line.split()[-1])
line = next(inputfile)
assert "Total number of atoms:" in line
self.set_attribute("natom", int(line.split()[-1]))
self.skip_lines(inputfile, ['b', 'basisname', 'b'])
line = next(inputfile)
cols = line.split()
# Detecting which columns things are in will be somewhat more robust
# to formatting changes in the future.
iatoms = cols.index('atoms')
icharge = cols.index('charge')
icont = cols.index('cont')
self.skip_line(inputfile, 'dashes')
atomnos = []
atombasis = []
nbasis = 0
for itype in range(self.ntypes):
line = next(inputfile)
cols = line.split()
atoms = int(cols[iatoms])
charge = float(cols[icharge])
assert int(charge) == charge
charge = int(charge)
cont = int(cols[icont])
for at in range(atoms):
atomnos.append(charge)
# If symmetry atoms are present, these will have basis functions
# printed immediately after the one unique atom, so for all
# practical purposes cclib can assume the ordering in atombasis
# follows this out-of order scheme to match the output.
if self.symmetry_atoms:
# So we extend atombasis only for the unique atoms (with a
# symmetry index of 1), interleaving the basis functions
# for this atoms with basis functions for all symmetry atoms.
if self.symmetry_atoms[at] == 1:
nsyms = 1
while (at + nsyms < self.natom) and self.symmetry_atoms[at + nsyms] == nsyms + 1:
nsyms += 1
for isym in range(nsyms):
istart = nbasis + isym
iend = nbasis + cont*nsyms + isym
atombasis.append(list(range(istart, iend, nsyms)))
nbasis += cont*nsyms
else:
atombasis.append(list(range(nbasis, nbasis + cont)))
nbasis += cont
self.set_attribute('atomnos', atomnos)
self.set_attribute('atombasis', atombasis)
self.set_attribute('nbasis', nbasis)
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
self.set_attribute('natom', int(line.split()[iatoms]))
self.set_attribute('nbasis', int(line.split()[icont]))
self.skip_line(inputfile, 'dashes')
# The Gaussian exponents and contraction coefficients are printed for each primitive
# and then the contraction information is printed separately (see below) Both segmented
# and general contractions are used, but we can parse them the same way since zeros are
# inserted for primitives that are not used. However, no atom index is printed here
# so we don't really know when a new atom is started without using information
# from other section (we should already have atombasis parsed at this point).
#
# Orbital exponents and contraction coefficients
# ----------------------------------------------
#
#
# C #1 1s 1 71.616837 0.1543 0.0000
# seg. cont. 2 13.045096 0.5353 0.0000
# 3 3.530512 0.4446 0.0000
# 4 2.941249 0.0000 -0.1000
# ...
#
# Here is a corresponding fragment for general contractions:
#
# C 1s 1 33980.000000 0.0001 -0.0000 0.0000 0.0000 0.0000
# 0.0000 0.0000 0.0000 0.0000
# gen. cont. 2 5089.000000 0.0007 -0.0002 0.0000 0.0000 0.0000
# 0.0000 0.0000 0.0000 0.0000
# 3 1157.000000 0.0037 -0.0008 0.0000 0.0000 0.0000
# 0.0000 0.0000 0.0000 0.0000
# 4 326.600000 0.0154 -0.0033 0.0000 0.0000 0.0000
# ...
#
if line.strip() == "Orbital exponents and contraction coefficients":
self.skip_lines(inputfile, ['d', 'b', 'b'])
# Here we simply want to save the numbers defining each primitive for later use,
# where the first number is the exponent, and the rest are coefficients which
# should be zero if the primitive is not used in a contraction. This list is
# symmetry agnostic, although primitives/contractions are not generally.
self.primitives = []
prims = []
line = next(inputfile)
while line.strip():
# Each contraction/section is separated by a blank line, and at the very
# end there is an extra blank line.
while line.strip():
# For generalized contraction it is typical to see the coefficients wrapped
# to new lines, so we must collect them until we are sure a primitive starts.
if line[:30].strip():
if prims:
self.primitives.append(prims)
prims = []
prims += [float(x) for x in line[20:].split()]
line = next(inputfile)
line = next(inputfile)
# At the end we have the final primitive to save.
self.primitives.append(prims)
# This is the corresponding section to the primitive definitions parsed above, so we
# assume those numbers are available in the variable 'primitives'. Here we read in the
# indicies of primitives, which we use to construct gbasis.
#
# Contracted Orbitals
# -------------------
#
# 1 C 1s 1 2 3 4 5 6 7 8 9 10 11 12
# 2 C 1s 1 2 3 4 5 6 7 8 9 10 11 12
# 3 C 1s 10
# 4 C 1s 11
# ...
#
# Here is an fragment with symmetry labels:
#
# ...
# 1 C #1 1s 1 2 3
# 2 C #2 1s 7 8 9
# 3 C #1 1s 4 5 6
# ...
#
if line.strip() == "Contracted Orbitals":
self.skip_lines(inputfile, ['d', 'b'])
# This is the reverse of atombasis, so that we can easily map from a basis functions
# to the corresponding atom for use in the loop below.
basisatoms = [None for i in range(self.nbasis)]
for iatom in range(self.natom):
for ibasis in self.atombasis[iatom]:
basisatoms[ibasis] = iatom
# Since contractions are not generally given in order (when there is symmetry),
# start with an empty list for gbasis.
gbasis = [[] for i in range(self.natom)]
# This will hold the number of contractions already printed for each orbital,
# counting symmetry orbitals separately.
orbitalcount = {}
for ibasis in range(self.nbasis):
line = next(inputfile)
cols = line.split()
# The first columns is always the basis function index, which we can assert.
assert int(cols[0]) == ibasis + 1
# The number of columns is differnet when symmetry is used. If there are further
# complications, it may be necessary to use exact slicing, since the formatting
# of this section seems to be fixed (although columns can be missing). Notice how
# We subtract one from the primitive indices here already to match cclib's
# way of counting from zero in atombasis.
if '#' in line:
sym = cols[2]
orbital = cols[3]
prims = [int(i) - 1 for i in cols[4:]]
else:
sym = None
orbital = cols[2]
prims = [int(i) - 1 for i in cols[3:]]
shell = orbital[0]
subshell = orbital[1].upper()
iatom = basisatoms[ibasis]
# We want to count the number of contractiong already parsed for each orbital,
# but need to make sure to differentiate between atoms and symmetry atoms.
orblabel = str(iatom) + '.' + orbital + (sym or "")
orbitalcount[orblabel] = orbitalcount.get(orblabel, 0) + 1
# Here construct the actual primitives for gbasis, which should be a list
# of 2-tuples containing an exponent an coefficient. Note how we are indexing
# self.primitives from zero although the printed numbering starts from one.
primitives = []
for ip in prims:
p = self.primitives[ip]
exponent = p[0]
coefficient = p[orbitalcount[orblabel]]
primitives.append((exponent, coefficient))
contraction = (subshell, primitives)
if contraction not in gbasis[iatom]:
gbasis[iatom].append(contraction)
self.skip_line(inputfile, 'blank')
self.set_attribute('gbasis', gbasis)
# Since DALTON sometimes uses symmetry labels (Ag, Au, etc.) and sometimes
# just the symmetry group index, we need to parse and keep a mapping between
# these two for later use.
#
# Symmetry Orbitals
# -----------------
#
# Number of orbitals in each symmetry: 25 5 25 5
#
#
# Symmetry Ag ( 1)
#
# 1 C 1s 1 + 2
# 2 C 1s 3 + 4
# ...
#
if line.strip() == "Symmetry Orbitals":
self.skip_lines(inputfile, ['d', 'b'])
line = inputfile.next()
self.symcounts = [int(c) for c in line.split(':')[1].split()]
self.symlabels = []
for sc in self.symcounts:
self.skip_lines(inputfile, ['b', 'b'])
# If the number of orbitals for a symmetry is zero, the printout
# is different (see MP2 unittest logfile for an example).
line = inputfile.next()
if sc == 0:
assert "No orbitals in symmetry" in line
else:
assert line.split()[0] == "Symmetry"
self.symlabels.append(line.split()[1])
self.skip_line(inputfile, 'blank')
for i in range(sc):
orbital = inputfile.next()
# Wave function specification
# ============================
# @ Wave function type >>> KS-DFT <<<
# @ Number of closed shell electrons 70
# @ Number of electrons in active shells 0
# @ Total charge of the molecule 0
#
# @ Spin multiplicity and 2 M_S 1 0
# @ Total number of symmetries 4 (point group: C2h)
# @ Reference state symmetry 1 (irrep name : Ag )
#
# This is a DFT calculation of type: B3LYP
# ...
#
if "@ Number of electrons in active shells" in line:
self.unpaired_electrons = int(line.split()[-1])
if "@ Total charge of the molecule" in line:
self.set_attribute("charge", int(line.split()[-1]))
if "@ Spin multiplicity and 2 M_S" in line:
self.set_attribute("mult", int(line.split()[-2]))
# Orbital specifications
# ======================
# Abelian symmetry species All | 1 2 3 4
# | Ag Au Bu Bg
# --- | --- --- --- ---
# Total number of orbitals 60 | 25 5 25 5
# Number of basis functions 60 | 25 5 25 5
#
# ** Automatic occupation of RKS orbitals **
#
# -- Initial occupation of symmetries is determined from extended Huckel guess.
# -- Initial occupation of symmetries is :
# @ Occupied SCF orbitals 35 | 15 2 15 3
#
# Maximum number of Fock iterations 0
# Maximum number of DIIS iterations 60
# Maximum number of QC-SCF iterations 60
# Threshold for SCF convergence 1.00D-05
# This is a DFT calculation of type: B3LYP
# ...
#
if "Total number of orbitals" in line:
# DALTON 2015 adds a @ in front of number of orbitals
chomp = line.split()
index = 4
if "@" in chomp:
index = 5
self.set_attribute("nbasis", int(chomp[index]))
self.nmo_per_symmetry = list(map(int, chomp[index+2:]))
assert self.nbasis == sum(self.nmo_per_symmetry)
if "@ Occupied SCF orbitals" in line and not hasattr(self, 'homos'):
temp = line.split()
homos = int(temp[4])
self.set_attribute('homos', [homos - 1 + self.unpaired_electrons])
if "Threshold for SCF convergence" in line:
if not hasattr(self, "scftargets"):
self.scftargets = []
scftarget = self.float(line.split()[-1])
self.scftargets.append([scftarget])
# .--------------------------------------------.
# | Starting in Wave Function Section (SIRIUS) |
# `--------------------------------------------'
#
if "Starting in Wave Function Section (SIRIUS)" in line:
self.section = "SIRIUS"
# *********************************************
# ***** DIIS optimization of Hartree-Fock *****
# *********************************************
#
# C1-DIIS algorithm; max error vectors = 8
#
# Automatic occupation of symmetries with 70 electrons.
#
# Iter Total energy Error norm Delta(E) SCF occupation
# -----------------------------------------------------------------------------
# K-S energy, electrons, error : -46.547567739269 69.9999799123 -2.01D-05
# @ 1 -381.645762476 4.00D+00 -3.82D+02 15 2 15 3
# Virial theorem: -V/T = 2.008993
# @ MULPOP C _1 0.15; C _2 0.15; C _1 0.12; C _2 0.12; C _1 0.11; C _2 0.11; H _1 -0.15; H _2 -0.15; H _1 -0.14; H _2 -0.14;
# @ C _1 0.23; C _2 0.23; H _1 -0.15; H _2 -0.15; C _1 0.08; C _2 0.08; H _1 -0.12; H _2 -0.12; H _1 -0.13; H _2 -0.13;
# -----------------------------------------------------------------------------
# K-S energy, electrons, error : -46.647668038900 69.9999810430 -1.90D-05
# @ 2 -381.949410128 1.05D+00 -3.04D-01 15 2 15 3
# Virial theorem: -V/T = 2.013393
# ...
#
# With and without symmetry, the "Total energy" line is shifted a little.
if self.section == "SIRIUS" and "Iter" in line and "Total energy" in line:
iteration = 0
converged = False
values = []
if not hasattr(self, "scfvalues"):
self.scfvalues = []
while not converged:
line = next(inputfile)
# each iteration is bracketed by "-------------"
if "-------------------" in line:
iteration += 1
continue
# the first hit of @ n where n is the current iteration
strcompare = "@{0:>3d}".format(iteration)
if strcompare in line:
temp = line.split()
val = self.float(temp[3])
values.append([val])
if line[0] == "@" and "converged in" in line:
converged = True
# It seems DALTON does change the SCF convergence criteria during a
# geometry optimization, but also does not print them. So, assume they
# are unchanged and copy the initial values after the first step. However,
# it would be good to check up on this - perhaps it is possible to print.
self.scfvalues.append(values)
if len(self.scfvalues) > 1:
self.scftargets.append(self.scftargets[-1])
# DALTON organizes the energies by symmetry, so we need to parse first,
# and then sort the energies (and labels) before we store them.
#
# The formatting varies depending on RHF/DFT and/or version. Here is
# an example from a DFT job:
#
# *** SCF orbital energy analysis ***
#
# Only the five lowest virtual orbital energies printed in each symmetry.
#
# Number of electrons : 70
# Orbital occupations : 15 2 15 3
#
# Sym Kohn-Sham orbital energies
#
# 1 Ag -10.01616533 -10.00394288 -10.00288640 -10.00209612 -9.98818062
# -0.80583154 -0.71422407 -0.58487249 -0.55551093 -0.50630125
# ...
#
# Here is an example from an RHF job that only has symmetry group indices:
#
# *** SCF orbital energy analysis ***
#
# Only the five lowest virtual orbital energies printed in each symmetry.
#
# Number of electrons : 70
# Orbital occupations : 15 2 15 3
#
# Sym Hartree-Fock orbital energies
#
# 1 -11.04052518 -11.03158921 -11.02882211 -11.02858563 -11.01747921
# -1.09029777 -0.97492511 -0.79988247 -0.76282547 -0.69677619
# ...
#
if self.section == "SIRIUS" and "*** SCF orbital energy analysis ***" in line:
# to get ALL orbital energies, the .PRINTLEVELS keyword needs
# to be at least 0,10 (up from 0,5). I know, obvious, right?
# this, however, will conflict with the scfvalues output that
# changes into some weird form of DIIS debug output.
mosyms = []
moenergies = []
self.skip_line(inputfile, 'blank')
line = next(inputfile)
# There is some extra text between the section header and
# the number of electrons for open-shell calculations.
while "Number of electrons" not in line:
line = next(inputfile)
nelectrons = int(line.split()[-1])
line = next(inputfile)
occupations = [int(o) for o in line.split()[3:]]
nsym = len(occupations)
self.skip_lines(inputfile, ['b', 'header', 'b'])
# now parse nsym symmetries
for isym in range(nsym):
# For unoccupied symmetries, nothing is printed here.
if occupations[isym] == 0:
continue
# When there are exactly five energies printed (on just one line), it seems
# an extra blank line is printed after a block.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
cols = line.split()
# The first line has the orbital symmetry information, but sometimes
# it's the label and sometimes it's the index. There are always five
# energies per line, though, so we can deduce if we have the labels or
# not just the index. In the latter case, we depend on the labels
# being read earlier into the list `symlabels`. Finally, if no symlabels
# were read that implies there is only one symmetry, namely Ag.
if 'A' in cols[1] or 'B' in cols[1]:
sym = self.normalisesym(cols[1])
energies = [float(t) for t in cols[2:]]
else:
if hasattr(self, 'symlabels'):
sym = self.normalisesym(self.symlabels[int(cols[0]) - 1])
else:
assert cols[0] == '1'
sym = "Ag"
energies = [float(t) for t in cols[1:]]
while len(energies) > 0:
moenergies.extend(energies)
mosyms.extend(len(energies)*[sym])
line = next(inputfile)
energies = [float(col) for col in line.split()]
# now sort the data about energies and symmetries. see the following post for the magic
# http://stackoverflow.com/questions/19339/a-transpose-unzip-function-in-python-inverse-of-zip
sdata = sorted(zip(moenergies, mosyms), key=lambda x: x[0])
moenergies, mosyms = zip(*sdata)
self.moenergies = [[]]
self.moenergies[0] = [utils.convertor(moenergy, 'hartree', 'eV') for moenergy in moenergies]
self.mosyms = [[]]
self.mosyms[0] = mosyms
if not hasattr(self, "nmo"):
self.nmo = self.nbasis
if len(self.moenergies[0]) != self.nmo:
self.set_attribute('nmo', len(self.moenergies[0]))
# .-----------------------------------.
# | >>> Final results from SIRIUS <<< |
# `-----------------------------------'
#
#
# @ Spin multiplicity: 1
# @ Spatial symmetry: 1 ( irrep Ag in C2h )
# @ Total charge of molecule: 0
#
# @ Final DFT energy: -382.050716652387
# @ Nuclear repulsion: 445.936979976608
# @ Electronic energy: -827.987696628995
#
# @ Final gradient norm: 0.000003746706
# ...
#
if "Final DFT energy" in line or "Final HF energy" in line:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
temp = line.split()
self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV"))
if "@ = MP2 second order energy" in line:
energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
self.mpenergies[-1].append(energ)
if "Total energy CCSD(T)" in line:
energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append(energ)
# The molecular geometry requires the use of .RUN PROPERTIES in the input.
# Note that the second column is not the nuclear charge, but the atom type
# index used internally by DALTON.
#
# Molecular geometry (au)
# -----------------------
#
# C _1 1.3498778652 2.3494125195 0.0000000000
# C _2 -1.3498778652 -2.3494125195 0.0000000000
# C _1 2.6543517307 0.0000000000 0.0000000000
# ...
#
if "Molecular geometry (au)" in line:
if not hasattr(self, "atomcoords"):
self.atomcoords = []
if self.firststdorient:
self.firststdorient = False
self.skip_lines(inputfile, ['d', 'b'])
lines = [next(inputfile) for i in range(self.natom)]
atomcoords = self.parse_geometry(lines)
self.atomcoords.append(atomcoords)
if "Optimization Control Center" in line:
self.section = "OPT"
assert set(next(inputfile).strip()) == set(":")
# During geometry optimizations the geometry is printed in the section
# that is titles "Optimization Control Center". Note that after an optimizations
# finishes, DALTON normally runs another "static property section (ABACUS)",
# so the final geometry will be repeated in atomcoords.
#
# Next geometry (au)
# ------------------
#
# C _1 1.3203201560 2.3174808341 0.0000000000
# C _2 -1.3203201560 -2.3174808341 0.0000000000
# ...
if self.section == "OPT" and line.strip() == "Next geometry (au)":
self.skip_lines(inputfile, ['d', 'b'])
lines = [next(inputfile) for i in range(self.natom)]
coords = self.parse_geometry(lines)
self.atomcoords.append(coords)
# This section contains data for optdone and geovalues, although we could use
# it to double check some atttributes that were parsed before.
#
# Optimization information
# ------------------------
#
# Iteration number : 4
# End of optimization : T
# Energy at this geometry is : -379.777956
# Energy change from last geom. : -0.000000
# Predicted change : -0.000000
# Ratio, actual/predicted change : 0.952994
# Norm of gradient : 0.000058
# Norm of step : 0.000643
# Updated trust radius : 0.714097
# Total Hessian index : 0
#
if self.section == "OPT" and line.strip() == "Optimization information":
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert 'Iteration number' in line
iteration = int(line.split()[-1])
line = next(inputfile)
assert 'End of optimization' in line
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(line.split()[-1] == 'T')
# We need a way to map between lines here and the targets stated at the
# beginning of the file in 'Chosen parameters for *OPTIMI (see above),
# and this dictionary facilitates that. The keys are target names parsed
# in that initial section after input processing, and the values are
# substrings that should appear in the lines in this section. Make an
# exception for the energy at iteration zero where there is no gradient,
# and take the total energy for geovalues.
targets_labels = {
'gradient': 'Norm of gradient',
'energy': 'Energy change from last',
'step': 'Norm of step',
}
values = [numpy.nan] * len(self.geotargets)
while line.strip():
if iteration == 0 and "Energy at this geometry" in line:
index = self.geotargets_names.index('energy')
values[index] = self.float(line.split()[-1])
for tgt, lbl in targets_labels.items():
if lbl in line and tgt in self.geotargets_names:
index = self.geotargets_names.index(tgt)
values[index] = self.float(line.split()[-1])
line = next(inputfile)
# If we're missing something above, throw away the partial geovalues since
# we don't want artificial NaNs getting into cclib. Instead, fix the dictionary
# to make things work.
if not numpy.nan in values:
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append(values)
# -------------------------------------------------
# extract the center of mass line
if "Center-of-mass coordinates (a.u.):" in line:
temp = line.split()
reference = [utils.convertor(float(temp[i]), "bohr", "Angstrom") for i in [3, 4, 5]]
if not hasattr(self, 'moments'):
self.moments = [reference]
# -------------------------------------------------
# Extract the dipole moment
if "Dipole moment components" in line:
dipole = numpy.zeros(3)
line = next(inputfile)
line = next(inputfile)
line = next(inputfile)
if not "zero by symmetry" in line:
line = next(inputfile)
line = next(inputfile)
temp = line.split()
for i in range(3):
dipole[i] = float(temp[2]) # store the Debye value
if hasattr(self, 'moments'):
self.moments.append(dipole)
## 'vibfreqs', 'vibirs', and 'vibsyms' appear in ABACUS.
# Vibrational Frequencies and IR Intensities
# ------------------------------------------
#
# mode irrep frequency IR intensity
# ============================================================
# cm-1 hartrees km/mol (D/A)**2/amu
# ------------------------------------------------------------
# 1 A 3546.72 0.016160 0.000 0.0000
# 2 A 3546.67 0.016160 0.024 0.0006
# ...
if "Vibrational Frequencies and IR Intensities" in line:
self.skip_lines(inputfile, ['dashes', 'blank'])
line = next(inputfile)
assert line.strip() == "mode irrep frequency IR intensity"
self.skip_line(inputfile, 'equals')
line = next(inputfile)
assert line.strip() == "cm-1 hartrees km/mol (D/A)**2/amu"
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
# The normal modes are in order of decreasing IR
# frequency, so they can't be added directly to
# attributes; they must be grouped together first, sorted
# in order of increasing frequency, then added to their
# respective attributes.
vibdata = []
while line.strip():
sline = line.split()
vibsym = sline[1]
vibfreq = float(sline[2])
vibir = float(sline[4])
vibdata.append((vibfreq, vibir, vibsym))
line = next(inputfile)
vibdata.sort(key=lambda normalmode: normalmode[0])
self.vibfreqs = [normalmode[0] for normalmode in vibdata]
self.vibirs = [normalmode[1] for normalmode in vibdata]
self.vibsyms = [normalmode[2] for normalmode in vibdata]
# Now extract the normal mode displacements.
self.skip_lines(inputfile, ['b', 'b'])
line = next(inputfile)
assert line.strip() == "Normal Coordinates (bohrs*amu**(1/2)):"
# Normal Coordinates (bohrs*amu**(1/2)):
# --------------------------------------
#
#
# 1 3547 2 3547 3 3474 4 3471 5 3451
# ----------------------------------------------------------------------
#
# C x -0.000319 -0.000314 0.002038 0.000003 -0.001599
# C y -0.000158 -0.000150 -0.001446 0.003719 -0.002576
# C z 0.000000 -0.000000 -0.000000 0.000000 -0.000000
#
# C x 0.000319 -0.000315 -0.002038 0.000003 0.001600
# C y 0.000157 -0.000150 0.001448 0.003717 0.002577
# ...
self.skip_line(inputfile, 'd')
line = next(inputfile)
vibdisps = numpy.empty(shape=(len(self.vibirs), self.natom, 3))
ndisps = 0
while ndisps < len(self.vibirs):
# Skip two blank lines.
line = next(inputfile)
line = next(inputfile)
# Use the header with the normal mode indices and
# frequencies to update where we are.
ndisps_block = (len(line.split()) // 2)
mode_min, mode_max = ndisps, ndisps + ndisps_block
# Skip a line of dashes and a blank line.
line = next(inputfile)
line = next(inputfile)
for w in range(self.natom):
for coord in range(3):
line = next(inputfile)
vibdisps[mode_min:mode_max, w, coord] = [float(i) for i in line.split()[2:]]
# Skip a blank line.
line = next(inputfile)
ndisps += ndisps_block
# The vibrational displacements are in the wrong order;
# reverse them.
self.vibdisps = vibdisps[::-1, :, :]
## 'vibramans'
# Raman related properties for freq. 0.000000 au = Infinity nm
# ---------------------------------------------------------------
#
# Mode Freq. Alpha**2 Beta(a)**2 Pol.Int. Depol.Int. Dep. Ratio
#
# 1 3546.72 0.379364 16.900089 84.671721 50.700268 0.598786
# 2 3546.67 0.000000 0.000000 0.000000 0.000000 0.599550
if "Raman related properties for freq." in line:
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert line[1:76] == "Mode Freq. Alpha**2 Beta(a)**2 Pol.Int. Depol.Int. Dep. Ratio"
self.skip_line(inputfile, 'b')
line = next(inputfile)
vibramans = []
# The Raman intensities appear under the "Pol.Int."
# (polarization intensity) column.
for m in range(len(self.vibfreqs)):
vibramans.append(float(line.split()[4]))
line = next(inputfile)
# All vibrational properties in DALTON appear in reverse
# order.
self.vibramans = vibramans[::-1]
# Electronic excitations: single residues of the linear
# response equations.
if "Linear Response single residue calculation" in line:
etsyms = []
etenergies = []
# etoscs = []
etsecs = []
symmap = {"T": "Triplet", "F": "Singlet"}
while "End of Dynamic Property Section (RESPONS)" not in line:
line = next(inputfile)
if "Operator symmetry" in line:
do_triplet = line[-2]
if "@ Excited state no:" in line:
etsym = line.split()[9] # -2
etsyms.append(symmap[do_triplet] + "-" + etsym)
self.skip_lines(inputfile, ['d', 'b', 'Excitation energy in a.u.'])
line = next(inputfile)
etenergy = float(line.split()[1])
etenergies.append(etenergy)
while "The dominant contributions" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'b')
line = next(inputfile)
# [0] is the starting (occupied) MO
# [1] is the ending (unoccupied) MO
# [2] and [3] are the excitation/deexcitation coefficients
# [4] is the orbital overlap
# [5] is the ...
# [6] is the ...
# [7] is the ...
assert "I A K_IA K_AI <|I|*|A|> <I^2*A^2> Weight Contrib" in line
self.skip_line(inputfile, 'b')
line = next(inputfile)
sec = []
while line.strip():
chomp = line.split()
startidx = int(chomp[0]) - 1
endidx = int(chomp[1]) - 1
contrib = float(chomp[2])
# Since DALTON is restricted open-shell only,
# there is not distinction between alpha and
# beta spin.
sec.append([(startidx, 0), (endidx, 0), contrib])
line = next(inputfile)
etsecs.append(sec)
self.set_attribute('etsyms', etsyms)
self.set_attribute('etenergies', etenergies)
# self.set_attribute('etoscs', etoscs)
self.set_attribute('etsecs', etsecs)
# TODO:
# aonames
# aooverlaps
# atomcharges
# atomspins
# coreelectrons
# enthalpy
# entropy
# etoscs
# etrotats
# freeenergy
# grads
# hessian
# mocoeffs
# nocoeffs
# nooccnos
# scancoords
# scanenergies
# scannames
# scanparm
# temperature
# vibanharms
# N/A:
# fonames
# fooverlaps
# fragnames
# frags
if __name__ == "__main__":
import doctest, daltonparser, sys
if len(sys.argv) == 1:
doctest.testmod(daltonparser, verbose=False)
if len(sys.argv) >= 2:
parser = daltonparser.DALTON(sys.argv[1])
data = parser.parse()
if len(sys.argv) > 2:
for i in range(len(sys.argv[2:])):
if hasattr(data, sys.argv[2 + i]):
print(getattr(data, sys.argv[2 + i]))
|
ghutchis/cclib
|
src/cclib/parser/daltonparser.py
|
Python
|
lgpl-2.1
| 47,748
|
[
"Dalton",
"Gaussian",
"cclib"
] |
548d856f582363c2ba8397bd5f6308d6efc9be92c5c08dca6e303db16b9e5eb7
|
# TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E128
"""Linter that warns against files that aren't following frontend best
practices.
At the moment this checks for handlebars, less, and css files and complains if
any attempts are made to add new files of this type. Existing files (set in
a whitelist) are excluded from this check.
"""
from __future__ import absolute_import
from shared.testutil import lintutil
_BAD_EXTENSIONS = (
'.handlebars',
'.less',
'.css',
)
# Files that are being grandfathered in. You really shouldn't be adding
# files to this list, really only deleting entries as the associated files
# are also removed.
#
# This list was generated by running:
# git ls-files '*.handlebars' '*.less' '*.css' |
# sed -e 's/^/ "/' -e 's/$/",/'
# -e 's/\(.\{79,\}\)/\1 # @Nolint(long line)/'
_EXISTING_FILE_WHITELIST = [
"extbackup/static/css/compiled.css",
"gandalf/static/css/base.css",
"gandalf/static/css/style.css",
"javascript/badges-package/share-links.handlebars",
"javascript/bigbingo-dashboard-package/tooltip-template.handlebars",
"javascript/dashboard-package/focus-mode-chrome.handlebars",
"javascript/dashboard-secondary-package/dashboard-overview-doc.handlebars", # @Nolint(long line)
"javascript/discussion-package/answer.handlebars",
"javascript/discussion-package/comment.handlebars",
"javascript/discussion-package/discussion-list-profile-widget.handlebars",
"javascript/discussion-package/discussion-list.handlebars",
"javascript/discussion-package/discussion.handlebars",
"javascript/discussion-package/duplicate-notice.handlebars",
"javascript/discussion-package/flag-modal.handlebars",
"javascript/discussion-package/flags-hovercard.handlebars",
"javascript/discussion-package/formatting-tips.handlebars",
"javascript/discussion-package/low-quality-notice-footer.handlebars",
"javascript/discussion-package/low-quality-notice-header.handlebars",
"javascript/discussion-package/note.handlebars",
"javascript/discussion-package/project.handlebars",
"javascript/discussion-package/question.handlebars",
"javascript/discussion-package/replies-container.handlebars",
"javascript/discussion-package/reply-form.handlebars",
"javascript/discussion-package/reply.handlebars",
"javascript/discussion-package/scratchpad-flag-modal.handlebars",
"javascript/discussion-package/scratchpad-thumbnail.handlebars",
"javascript/discussion-package/sorts.handlebars",
"javascript/discussion-package/thread.handlebars",
"javascript/editor-package/commit-info.handlebars",
"javascript/editor-package/content-log-diff.handlebars",
"javascript/exercises-components-package/calculator.handlebars",
"javascript/exercises-legacy-package/calculating-card.handlebars",
"javascript/exercises-legacy-package/current-card.handlebars",
"javascript/exercises-legacy-package/exercise.handlebars",
"javascript/exercises-legacy-package/problem-template.handlebars",
"javascript/hover-card-package/hover-card.handlebars",
"javascript/login-package/signup-success-inner.handlebars",
"javascript/login-package/signup-success.handlebars",
"javascript/maps-package/knowledgemap-admin-exercise.handlebars",
"javascript/maps-package/knowledgemap-exercise.handlebars",
"javascript/maps-package/knowledgemap-topic.handlebars",
"javascript/message-box-package/generic-dialog.handlebars",
"javascript/notifications-package/accept-terms-of-service-notification.handlebars", # @Nolint(long line)
"javascript/notifications-package/api-version-mismatch-notification.handlebars", # @Nolint(long line)
"javascript/notifications-package/banner-notification.handlebars",
"javascript/notifications-package/common-core-notification.handlebars",
"javascript/notifications-package/email-bounce-notification.handlebars",
"javascript/notifications-package/i18n-live-notification.handlebars",
"javascript/notifications-package/i18n-suggest-notification.handlebars",
"javascript/notifications-package/i18n-test-notification.handlebars",
"javascript/notifications-package/link-email-notification.handlebars",
"javascript/notifications-package/phantom-notification.handlebars",
"javascript/notifications-package/restricted-domain-age-check-notification.handlebars", # @Nolint(long line)
"javascript/notifications-package/urgent-notification.handlebars",
"javascript/notifications-package/verify-email-notification.handlebars",
"javascript/notifications-package/zero-rated-notification.handlebars",
"javascript/notifications-package/zero-rating-available-notification.handlebars", # @Nolint(long line)
"javascript/perseus-admin-package/edit-filters.handlebars",
"javascript/perseus-admin-package/exercise-list.handlebars",
"javascript/perseus-admin-package/filter-description.handlebars",
"javascript/perseus-admin-package/item-list.handlebars",
"javascript/perseus-admin-package/modal-add-exercise.handlebars",
"javascript/perseus-admin-package/perseus-tag-list.handlebars",
"javascript/perseus-admin-package/tag-tallies.handlebars",
"javascript/phantom-package/change-email-dialog.handlebars",
"javascript/phantom-package/link-email-dialog.handlebars",
"javascript/profile-nav-package/profile-content-chrome.handlebars",
"javascript/profile-nav-package/username-picker.handlebars",
"javascript/profile-package/achievements.handlebars",
"javascript/profile-package/badge-compact.handlebars",
"javascript/profile-package/badge-display-case.handlebars",
"javascript/profile-package/coach.handlebars",
"javascript/profile-package/coaches.handlebars",
"javascript/profile-package/discussion-awards-block.handlebars",
"javascript/profile-package/discussion-hellban.handlebars",
"javascript/profile-package/discussion-moderation.handlebars",
"javascript/profile-package/discussion-send-message.handlebars",
"javascript/profile-package/discussion-statistics.handlebars",
"javascript/profile-package/empty-badge-picker.handlebars",
"javascript/profile-package/error-tab-not-accessible.handlebars",
"javascript/profile-package/no-coaches.handlebars",
"javascript/profile-package/profile.handlebars",
"javascript/profile-package/recent-activity-badge.handlebars",
"javascript/profile-package/recent-activity-challenge.handlebars",
"javascript/profile-package/recent-activity-list.handlebars",
"javascript/profile-package/recent-activity-progress-change.handlebars",
"javascript/profile-package/recent-activity-video.handlebars",
"javascript/recommendations-package/exercise-preview.handlebars",
"javascript/recommendations-package/visit-by-student-recs.handlebars",
"javascript/reports-package/empty-list.handlebars",
"javascript/scratchpad-base-package/docs-document.handlebars",
"javascript/scratchpad-base-package/docs-pjs.handlebars",
"javascript/scratchpad-base-package/docs-sql.handlebars",
"javascript/scratchpad-base-package/docs-webpage.handlebars",
"javascript/scratchpad-challenge-package/challenge-checkmark.handlebars",
"javascript/scratchpad-challenge-package/challenge-feedback-modal.handlebars", # @Nolint(long line)
"javascript/scratchpad-challenge-package/challenge-goal-flag.handlebars",
"javascript/scratchpad-challenge-package/challenge-pane-header.handlebars", # @Nolint(long line)
"javascript/scratchpad-challenge-package/challenge-step.handlebars",
"javascript/scratchpads-package/browser-notice.handlebars",
"javascript/stories-package/story-editor-dialog.handlebars",
"javascript/stories-package/story-editor-header.handlebars",
"javascript/stories-package/story-editor-row.handlebars",
"javascript/tasks-secondary-package/end-of-mastery-exercise-summary-item.handlebars", # @Nolint(long line)
"javascript/tasks-secondary-package/end-of-mastery-mission-progress.handlebars", # @Nolint(long line)
"javascript/tasks-secondary-package/end-of-mastery-task-card.handlebars",
"javascript/tasks-secondary-package/end-of-task-card.handlebars",
"javascript/tasks-secondary-package/focused-task-header.handlebars",
"javascript/tasks-secondary-package/mario-points.handlebars",
"javascript/testfiles-package/template-with-invoke-partial.handlebars",
"javascript/testfiles-package/template-with-partial.handlebars",
"javascript/transcripts-package/video-transcript.handlebars",
"kake/compile_all_pot-testfiles/javascript/foo/j2.handlebars",
"kake/compile_all_pot-testfiles/javascript/j1.handlebars",
"kake/compile_js_bundles-testfiles/javascript/bike-package/bell.handlebars", # @Nolint(long line)
"kake/compile_js_bundles-testfiles/javascript/bike-package/bikewithno.handlebars", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/expected/test_compress/video.less.min.css", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/expected/test_css/video-package.css", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/expected/test_max_inline_size/video-package.css", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/expected/test_sourcemap_css/video-package.css", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/javascript/video-package/t.handlebars", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/stylesheets/tiny-package/a.css",
"kake/compile_js_css_packages-testfiles/stylesheets/tiny-package/b.css",
"kake/compile_js_css_packages-testfiles/stylesheets/tiny-package/c.css",
"kake/compile_js_css_packages-testfiles/stylesheets/video-package/amara.less", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/stylesheets/video-package/modal.css", # @Nolint(long line)
"kake/compile_js_css_packages-testfiles/stylesheets/video-package/video.less", # @Nolint(long line)
"kake/compile_js_dep_graph-testfiles/javascript/stuff-package/one-partial.handlebars", # @Nolint(long line)
"kake/compile_js_dep_graph-testfiles/javascript/stuff-package/template.handlebars", # @Nolint(long line)
"kake/compile_js_dep_graph-testfiles/javascript/stuff-package/two-partial.handlebars", # @Nolint(long line)
"static/tii_iframe_header.css",
"stylesheets/about-package/about.less",
"stylesheets/about-package/blog.less",
"stylesheets/about-package/team.less",
"stylesheets/assignments-package/assignments.less",
"stylesheets/avatar-customizer-package/avatar-customizer.less",
"stylesheets/badge-package/spotlight.less",
"stylesheets/bigbingo-dashboard-package/bigbingo-dashboard.less",
"stylesheets/bigbingo-dashboard-package/funnel.less",
"stylesheets/checklist-package/checklist.less",
"stylesheets/clarifications-package/accepted.less",
"stylesheets/clarifications-package/discussion-meta.less",
"stylesheets/clarifications-package/shared.less",
"stylesheets/clarifications-package/suggested.less",
"stylesheets/clever-package/clever.less",
"stylesheets/coach-report-activity-package/report.less",
"stylesheets/coach-report-downloads-package/report.less",
"stylesheets/coach-report-exercises-package/report.less",
"stylesheets/coach-report-programs-package/report.less",
"stylesheets/coach-report-progress-by-student-package/histogram.less",
"stylesheets/coach-report-progress-by-student-package/masterdetail.less",
"stylesheets/coach-report-progress-by-student-package/recommendation-tab.less", # @Nolint(long line)
"stylesheets/coach-report-progress-by-student-package/report.less",
"stylesheets/coach-report-topic-package/report.less",
"stylesheets/commoncore-package/commoncore.less",
"stylesheets/commoncore-package/subject-page.less",
"stylesheets/content-tags-editor-package/editor.less",
"stylesheets/contentanalytics-package/contentanalytics.less",
"stylesheets/contextual-help-package/help-dots.less",
"stylesheets/contribute-package/contribute.less",
"stylesheets/cs-shared-package/program-square.less",
"stylesheets/dashboard-package/focus-mode-close.less",
"stylesheets/dashboard-package/learning-dashboard.less",
"stylesheets/dashboard-package/mastered-mission.less",
"stylesheets/dashboard-package/progress-summary.less",
"stylesheets/devadmin-package/devadmin.less",
"stylesheets/discussion-package/discussion.less",
"stylesheets/discussion-qa-package/discussion-qa.less",
"stylesheets/donate-package/donate-modal.less",
"stylesheets/donate-package/donate.less",
"stylesheets/editor-package/editor-status-view.less",
"stylesheets/editor-package/editor.less",
"stylesheets/editor-package/publish-status.less",
"stylesheets/editor-package/search-result-preview.less",
"stylesheets/eduorg-picker-package/eduorg-picker.less",
"stylesheets/error-monitor-package/error-monitor.less",
"stylesheets/exercise-content-package/khan-exercise.css",
"stylesheets/exercise-content-package/perseus-0.css",
"stylesheets/exercise-content-package/perseus-1.css",
"stylesheets/exercise-content-package/perseus-2.css",
"stylesheets/exercise-content-package/perseus-3.css",
"stylesheets/exercise-content-package/perseus-4.css",
"stylesheets/exercise-content-package/perseus-5.css",
"stylesheets/exercise-content-package/perseus-6.css",
"stylesheets/exercise-content-package/perseus-7.css",
"stylesheets/exercise-content-package/perseus-8.css",
"stylesheets/exercise-content-package/perseus-9.css",
"stylesheets/exercise-search-package/exercise-search.less",
"stylesheets/exerciseissues-package/exerciseissues.less",
"stylesheets/exercises-package/cards-n-stacks.less",
"stylesheets/exercises-package/stacks.less",
"stylesheets/follow-package/follow.less",
"stylesheets/fontawesome-package/bootstrap.less",
"stylesheets/fontawesome-package/core.less",
"stylesheets/fontawesome-package/extras.less",
"stylesheets/fontawesome-package/font-awesome.less",
"stylesheets/fontawesome-package/icons.less",
"stylesheets/fontawesome-package/mixins.less",
"stylesheets/fontawesome-package/path.less",
"stylesheets/fontawesome-package/variables.less",
"stylesheets/homepage-lite-package/homepage-lite.less",
"stylesheets/homepage-package/homepage-minimal.less",
"stylesheets/homepage-package/homepage.less",
"stylesheets/homeschool-package/homeschool.less",
"stylesheets/interns-package/interns.less",
"stylesheets/interns-package/page.less",
"stylesheets/intro-package/guiders.less",
"stylesheets/intro-package/intro.less",
"stylesheets/ipad-package/ipad.less",
"stylesheets/issues-package/issues.less",
"stylesheets/jqueryui-package/jquery-ui-1.10.1.custom.css",
"stylesheets/knowledgemap-package/kmap_editor.css",
"stylesheets/labs-package/labs.css",
"stylesheets/login-package/add-child/add-child.less",
"stylesheets/login-package/add-child/birthday-block.less",
"stylesheets/login-package/add-child/buttons-block.less",
"stylesheets/login-package/add-child/has-account.less",
"stylesheets/login-package/add-child/has-email-block.less",
"stylesheets/login-package/add-child/invite-child.less",
"stylesheets/login-package/add-child/user-creation.less",
"stylesheets/login-package/auth-form.less",
"stylesheets/login-package/complete-createchild.less",
"stylesheets/login-package/final.less",
"stylesheets/login-package/forgotpw.less",
"stylesheets/login-package/index-legacy.less",
"stylesheets/login-package/index.less",
"stylesheets/login-package/login-legacy.less",
"stylesheets/login-package/login.less",
"stylesheets/login-package/mixins.less",
"stylesheets/login-package/oauth-approval.less",
"stylesheets/login-package/shared-legacy.less",
"stylesheets/login-package/shared.less",
"stylesheets/login-package/signin-widgets.less",
"stylesheets/login-package/signup.less",
"stylesheets/maps-package/knowledge-map.css",
"stylesheets/maps-package/knowledgemap.less",
"stylesheets/mobile-package/mobile.less",
"stylesheets/moderation-package/moderation.less",
"stylesheets/nav-package/nav.less",
"stylesheets/notifications-package/challenge-notification.less",
"stylesheets/notifications-package/notifications.less",
"stylesheets/odometer-package/odometer-minimal.less",
"stylesheets/odometer-package/odometer-slow.less",
"stylesheets/odometer-package/odometer.less",
"stylesheets/parent-package/add-child-widget.less",
"stylesheets/parent-package/child-invite.less",
"stylesheets/parent-package/child-preview.less",
"stylesheets/parent-package/parent-homepage.less",
"stylesheets/parent-package/remove-child.less",
"stylesheets/periodic-table-package/periodic-table.less",
"stylesheets/perseus-admin-package/brave-new-world.less",
"stylesheets/perseus-admin-package/image-upload-dialog.less",
"stylesheets/perseus-admin-package/move-type-modal.less",
"stylesheets/perseus-admin-package/perseus-admin.less",
"stylesheets/perseus-admin-package/tagit.less",
"stylesheets/perseus-admin-package/variables.less",
"stylesheets/profile-nav-package/navbar.less",
"stylesheets/profile-nav-package/profile-customization.less",
"stylesheets/profile-nav-package/profile-header.less",
"stylesheets/profile-nav-package/profile-nav.less",
"stylesheets/profile-package/badges.less",
"stylesheets/profile-package/dashboards.less",
"stylesheets/profile-package/profile-widgets.less",
"stylesheets/profile-package/profile.less",
"stylesheets/profile-package/streak-widget.less",
"stylesheets/profile-reports-package/datepicker.less",
"stylesheets/profile-reports-package/profile-reports.less",
"stylesheets/profile-settings-package/profile-settings.less",
"stylesheets/publish-package/publish.less",
"stylesheets/pure-package/grids-responsive.css",
"stylesheets/pure-package/grids.css",
"stylesheets/react-sandbox-package/sandbox.less",
"stylesheets/recommendations-package/exercise-preview.less",
"stylesheets/recommendations-package/recommendations.less",
"stylesheets/referrals-package/akkurat-mono.css",
"stylesheets/referrals-package/referrals.less",
"stylesheets/reports-package/coach-report-variables.less",
"stylesheets/reports-package/coach-reports.less",
"stylesheets/reports-package/empty-class.less",
"stylesheets/reports-package/nav.less",
"stylesheets/reports-package/reports.less",
"stylesheets/restricted-domains-package/restricted-domains.less",
"stylesheets/rsat-landing-package/rsat-landing.less",
"stylesheets/sat-coach-package/sat-coach.less",
"stylesheets/sat-mission-package/dashboard-overview.less",
"stylesheets/sat-mission-package/dashboard-practice.less",
"stylesheets/sat-mission-package/dashboard-tips.less",
"stylesheets/sat-mission-package/icons.less",
"stylesheets/sat-mission-package/mixins.less",
"stylesheets/sat-mission-package/overview-banner.less",
"stylesheets/sat-mission-package/overview-cta.less",
"stylesheets/sat-mission-package/partnership.less",
"stylesheets/sat-mission-package/rotate-banner.less",
"stylesheets/sat-mission-package/sat-mission.less",
"stylesheets/sat-mission-package/sat-perseus-renderer.less",
"stylesheets/sat-mission-package/sat-video-modal.less",
"stylesheets/sat-mission-package/schedule-creator.less",
"stylesheets/sat-mission-package/task-view.less",
"stylesheets/sat-mission-package/tii_revision_assistant.css",
"stylesheets/sat-mission-package/transitions.less",
"stylesheets/sat-mission-package/variables.less",
"stylesheets/schema-viewer-package/schema-viewer.less",
"stylesheets/scratchpad-challenge-package/challenge.less",
"stylesheets/scratchpad-challenge-package/rainbow.css",
"stylesheets/scratchpad-project-editor-package/scratchpad-project-editor.less", # @Nolint(long line)
"stylesheets/scratchpads-package/browse.less",
"stylesheets/scratchpads-package/docs.less",
"stylesheets/scratchpads-package/editor.less",
"stylesheets/scratchpads-package/path-override.css",
"stylesheets/scratchpads-package/style.less",
"stylesheets/scratchpads-package/variables.less",
"stylesheets/search-package/search.less",
"stylesheets/select2-package/select2.css",
"stylesheets/settings-package/email-settings.less",
"stylesheets/settings-package/settings.less",
"stylesheets/settings-package/toggle-switch.less",
"stylesheets/shared-minimal-package/shared-minimal.less",
"stylesheets/shared-mobile-package/shared-mobile.less",
"stylesheets/shared-package/accessibility.less",
"stylesheets/shared-package/base-badges.css",
"stylesheets/shared-package/base-element-styles.less",
"stylesheets/shared-package/base-hovercard.less",
"stylesheets/shared-package/base-page-styles.less",
"stylesheets/shared-package/buttons.less",
"stylesheets/shared-package/default.css",
"stylesheets/shared-package/domain-colors.less",
"stylesheets/shared-package/domain-menu.less",
"stylesheets/shared-package/dropdown.less",
"stylesheets/shared-package/energy-points.less",
"stylesheets/shared-package/exercise-dashboard.css",
"stylesheets/shared-package/exercise-state-colors.less",
"stylesheets/shared-package/extras.css",
"stylesheets/shared-package/fancy-scrollbar.css",
"stylesheets/shared-package/forms.less",
"stylesheets/shared-package/info-box.css",
"stylesheets/shared-package/jquery.qtip.css",
"stylesheets/shared-package/ka-autocomplete.css",
"stylesheets/shared-package/ka-input.less",
"stylesheets/shared-package/kui/button.less",
"stylesheets/shared-package/kui/survey.less",
"stylesheets/shared-package/large-search-bar.less",
"stylesheets/shared-package/logotype.less",
"stylesheets/shared-package/mixins.less",
"stylesheets/shared-package/modals.less",
"stylesheets/shared-package/nav-header-footer.less",
"stylesheets/shared-package/navigation.css",
"stylesheets/shared-package/nnw-thumbnails.css",
"stylesheets/shared-package/notification-bar.less",
"stylesheets/shared-package/popovers.less",
"stylesheets/shared-package/progress-icons-domains.less",
"stylesheets/shared-package/progress-icons.less",
"stylesheets/shared-package/proxima-nova.css",
"stylesheets/shared-package/reset.css",
"stylesheets/shared-package/responsive-utilities.less",
"stylesheets/shared-package/rtl.less",
"stylesheets/shared-package/scratchpads.less",
"stylesheets/shared-package/shared.less",
"stylesheets/shared-package/spinner.less",
"stylesheets/shared-package/subscriptions.less",
"stylesheets/shared-package/task-preview.less",
"stylesheets/shared-package/tooltips.less",
"stylesheets/shared-package/topic-jump-bar.css",
"stylesheets/shared-package/typeahead-search.less",
"stylesheets/shared-package/variables.less",
"stylesheets/stats-tables-package/stats-tables.less",
"stylesheets/stories-package/stories.less",
"stylesheets/style-old-package/etc.less",
"stylesheets/style-old-package/layout.less",
"stylesheets/style-old-package/main.less",
"stylesheets/style-old-package/solarized.less",
"stylesheets/style-old-package/style.less",
"stylesheets/style-package/style.less",
"stylesheets/tasks-package/tasks.less",
"stylesheets/test-prep-package/test-prep-styles.less",
"stylesheets/topicsadmin-package/topics-admin.less",
"stylesheets/topicsadmin-package/ui_dynatree.css",
"stylesheets/tos-privacy-package/tos-privacy.less",
"stylesheets/translation-editor-package/translation-editor.less",
"stylesheets/translations-dashboard-package/translations-dashboard.less",
"stylesheets/tutorial-editor-package/tutorial-editor.less",
"stylesheets/tutorial-landing-page-package/mixins.less",
"stylesheets/tutorial-landing-page-package/tutorial-landing-page.less",
"stylesheets/tutorial-package/tutorial.less",
"stylesheets/typeahead-package/search-bar.less",
"stylesheets/typeahead-vanilla-package/typeahead.css",
"stylesheets/video-lite-package/video-lite.less",
"stylesheets/video-package/discussions.less",
"stylesheets/video-package/dropdowns.less",
"stylesheets/video-package/interactive-transcripts.less",
"stylesheets/video-package/ka-video-player.less",
"stylesheets/video-package/video-common.less",
"stylesheets/video-package/video-layout.less",
"stylesheets/video-package/video.less",
"stylesheets/zero-rating-package/zero-rating.less",
"templates/about/internet_essentials/internet_essentials.min.css",
"templates/about/internet_essentials/overrides.css",
"templates/hour_of_code/hour_of_code.css",
"templates/shared_landing.css",
"templates/summer_of_script.css",
"templates/women_computing.css",
"templates/ycla/overrides.css",
"templates/ycla/ycla.min.css",
"third_party/javascript-khansrc/katex/katex.min.css",
"third_party/javascript-khansrc/react-datepicker/react-datepicker.css",
"tools/less-to-js_test.less",
"tools/progress-icons/progress.less",
]
def lint_banned_file_types(files_to_lint):
"""Find all the bad files using banned extensions and complain."""
bad_files = lintutil.filter(files_to_lint, suffix=_BAD_EXTENSIONS,
exclude_substrings=_EXISTING_FILE_WHITELIST)
for bad_file in bad_files:
yield (bad_file, 1, "Use React and Aphrodite instead.")
|
Khan/khan-linter
|
contrib/frontend_best_practices_lint.py
|
Python
|
apache-2.0
| 25,800
|
[
"VisIt"
] |
7cf3e92482212dfe590724d30a3c1d21528e8b79cfca6d574185d174fc667054
|
# sql/compiler.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`~sqlalchemy.sql.compiler.SQLCompiler` - renders SQL
strings
:class:`~sqlalchemy.sql.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`~sqlalchemy.sql.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:module:`~sqlalchemy.ext.compiler`.
"""
import re
import sys
from sqlalchemy import schema, engine, util, exc
from sqlalchemy.sql import operators, functions, util as sql_util, \
visitors
from sqlalchemy.sql import expression as sql
import decimal
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in xrange(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat':"%%(%(name)s)s",
'qmark':"?",
'format':"%%s",
'numeric':":[_POSITION]",
'named':":%(name)s"
}
OPERATORS = {
# binary
operators.and_ : ' AND ',
operators.or_ : ' OR ',
operators.add : ' + ',
operators.mul : ' * ',
operators.sub : ' - ',
# Py2K
operators.div : ' / ',
# end Py2K
operators.mod : ' % ',
operators.truediv : ' / ',
operators.neg : '-',
operators.lt : ' < ',
operators.le : ' <= ',
operators.ne : ' != ',
operators.gt : ' > ',
operators.ge : ' >= ',
operators.eq : ' = ',
operators.concat_op : ' || ',
operators.between_op : ' BETWEEN ',
operators.match_op : ' MATCH ',
operators.in_op : ' IN ',
operators.notin_op : ' NOT IN ',
operators.comma_op : ', ',
operators.from_ : ' FROM ',
operators.as_ : ' AS ',
operators.is_ : ' IS ',
operators.isnot : ' IS NOT ',
operators.collate : ' COLLATE ',
# unary
operators.exists : 'EXISTS ',
operators.distinct_op : 'DISTINCT ',
operators.inv : 'NOT ',
# modifiers
operators.desc_op : ' DESC',
operators.asc_op : ' ASC',
operators.nullsfirst_op : ' NULLS FIRST',
operators.nullslast_op : ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce : 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user :'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
sql.CompoundSelect.UNION : 'UNION',
sql.CompoundSelect.UNION_ALL : 'UNION ALL',
sql.CompoundSelect.EXCEPT : 'EXCEPT',
sql.CompoundSelect.EXCEPT_ALL : 'EXCEPT ALL',
sql.CompoundSelect.INTERSECT : 'INTERSECT',
sql.CompoundSelect.INTERSECT_ALL : 'INTERSECT ALL'
}
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression._Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
@property
def quote(self):
return self.element.quote
class SQLCompiler(engine.Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to _BindParamClause
# instances.
self.binds = {}
# a dictionary of _BindParamClause instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self.result_map = {}
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
engine.Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = []
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m:str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
( (self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names )
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam, name in self.bind_names.iteritems():
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
else:
pd[name] = bindparam.effective_value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
pd[self.bind_names[bindparam]] = bindparam.effective_value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label(self, label, result_map=None,
within_label_clause=False,
within_columns_clause=False, **kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
if within_columns_clause and not within_label_clause:
if isinstance(label.name, sql._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if result_map is not None:
result_map[labelname.lower()] = (
label.name,
(label, label.element, labelname, ) +
label._alt_names,
label.type)
return label.element._compiler_dispatch(self,
within_columns_clause=True,
within_label_clause=True,
**kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(self,
within_columns_clause=False,
**kw)
def visit_column(self, column, result_map=None, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"it's 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, sql._truncated_label):
name = self._truncated_identifier("colident", name)
if result_map is not None:
result_map[name.lower()] = (orig_name,
(column, name, column.key),
column.type)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name, column.quote)
table = column.table
if table is None or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(
table.schema,
table.quote_schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, sql._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename, table.quote) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kwargs):
return self.dialect.type_compiler.process(typeclause.type)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kwargs):
if textclause.typemap is not None:
for colname, type_ in textclause.typemap.iteritems():
self.result_map[colname.lower()] = (colname, None, type_)
def do_bindparam(m):
name = m.group(1)
if name in textclause.bindparams:
return self.process(textclause.bindparams[name])
else:
return self.bindparam_string(name, **kwargs)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(lambda m: m.group(1),
BIND_PARAMS.sub(do_bindparam,
self.post_process_text(textclause.text))
)
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
return 'true'
def visit_false(self, expr, **kw):
return 'false'
def visit_clauselist(self, clauselist, **kwargs):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(c._compiler_dispatch(self, **kwargs)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (field,
extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, result_map=None, **kwargs):
if result_map is not None:
result_map[func.name.lower()] = (func.name, None, func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." % self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
entry = self.stack and self.stack[-1] or {}
self.stack.append({'from': entry.get('from', None),
'iswrapper': not entry})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit is not None or cs._offset is not None) and \
self.limit_clause(cs) or ""
if self.ctes and \
compound_index == 0 and not entry:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
s = unary.element._compiler_dispatch(self, **kw)
if unary.operator:
s = OPERATORS[unary.operator] + s
if unary.modifier:
s = s + OPERATORS[unary.modifier]
return s
def visit_binary(self, binary, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, sql._BindParamClause) and \
isinstance(binary.right, sql._BindParamClause):
kw['literal_binds'] = True
return self._operator_dispatch(binary.operator,
binary,
lambda opstr: binary.left._compiler_dispatch(self, **kw) +
opstr +
binary.right._compiler_dispatch(
self, **kw),
**kw
)
def visit_like_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notlike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def _operator_dispatch(self, operator, element, fn, **kw):
if util.callable(operator):
disp = getattr(self, "visit_%s" % operator.__name__, None)
if disp:
return disp(element, **kw)
else:
return fn(OPERATORS[operator])
else:
return fn(" " + operator + " ")
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False, **kwargs):
if literal_binds or \
(within_columns_clause and \
self.ansi_bind_rules):
if bindparam.value is None:
raise exc.CompileError("Bind parameter without a "
"renderable value not allowed here.")
return self.render_literal_bindparam(bindparam,
within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif getattr(existing, '_is_crud', False) or \
getattr(bindparam, '_is_crud', False):
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.value
processor = bindparam.type._cached_bind_processor(self.dialect)
if processor:
value = processor(value)
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
if isinstance(value, basestring):
value = value.replace("'", "''")
return "'%s'" % value
elif value is None:
return "NULL"
elif isinstance(value, (float, int, long)):
return repr(value)
elif isinstance(value, decimal.Decimal):
return str(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, sql._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name':name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if self.positional:
kwargs['positional_names'] = self.cte_positional
if isinstance(cte.name, sql._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return cte_name
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte.cte_alias:
if isinstance(cte.cte_alias, sql._truncated_label):
cte_alias = self._truncated_identifier("alias", cte.cte_alias)
else:
cte_alias = cte.cte_alias
if not cte.cte_alias and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, sql.Select):
col_source = cte.original
elif isinstance(cte.original, sql.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
self.ctes[cte] = text
if asfrom:
if cte.cte_alias:
text = self.preparer.format_alias(cte, cte_alias)
text += " AS " + cte_name
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, sql._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
" AS " + \
self.preparer.format_alias(alias, alias_name)
if fromhints and alias in fromhints:
hinttext = self.get_from_hint_text(alias, fromhints[alias])
if hinttext:
ret += " " + hinttext
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def label_select_column(self, select, column, asfrom):
"""label columns present in a select()."""
if isinstance(column, sql._Label):
return column
elif select is not None and \
select.use_labels and \
column._label:
return _CompileLabel(
column,
column._label,
alt_names=(column._key_label, )
)
elif \
asfrom and \
isinstance(column, sql.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, sql.Select):
return _CompileLabel(column, sql._as_truncated(column.name),
alt_names=(column.key,))
elif not isinstance(column,
(sql._UnaryExpression, sql._TextClause)) \
and (not hasattr(column, 'name') or \
isinstance(column, sql.Function)):
return _CompileLabel(column, column.anon_label)
else:
return column
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def visit_select(self, select, asfrom=False, parens=True,
iswrapper=False, fromhints=None,
compound_index=0,
positional_names=None, **kwargs):
entry = self.stack and self.stack[-1] or {}
existingfroms = entry.get('from', None)
froms = select._get_display_froms(existingfroms)
correlate_froms = set(sql._from_objects(*froms))
# TODO: might want to propagate existing froms for
# select(select(select)) where innermost select should correlate
# to outermost if existingfroms: correlate_froms =
# correlate_froms.union(existingfroms)
populate_result_map = compound_index == 0 and (
not entry or \
entry.get('iswrapper', False)
)
self.stack.append({'from': correlate_froms, 'iswrapper': iswrapper})
if populate_result_map:
column_clause_args = {'result_map': self.result_map,
'positional_names': positional_names}
else:
column_clause_args = {'positional_names': positional_names}
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self.label_select_column(select, co, asfrom=asfrom).\
_compiler_dispatch(self,
within_columns_clause=True,
**column_clause_args)
for co in util.unique_list(select.inner_columns)
]
if c is not None
]
text = "SELECT " # we're off to a good start !
if select._hints:
byfrom = dict([
(from_, hinttext % {
'name':from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.iteritems()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
if hint_text:
text += hint_text + " "
if select._prefixes:
text += " ".join(
x._compiler_dispatch(self, **kwargs)
for x in select._prefixes) + " "
text += self.get_select_precolumns(select)
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join([f._compiler_dispatch(self,
asfrom=True, fromhints=byfrom,
**kwargs)
for f in froms])
else:
text += ', '.join([f._compiler_dispatch(self,
asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if select._limit is not None or select._offset is not None:
text += self.limit_clause(select)
if select.for_update:
text += self.for_update_clause(select)
if self.ctes and \
compound_index == 0 and not entry:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _render_cte_clause(self):
if self.positional:
self.positiontup = self.cte_positional + self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select):
if select.for_update:
return " FOR UPDATE"
else:
return ""
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(sql.literal(select._offset))
return text
def visit_table(self, table, asfrom=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema,
table.quote_schema) + \
"." + self.preparer.quote(table.name,
table.quote)
else:
ret = self.preparer.quote(table.name, table.quote)
if fromhints and table in fromhints:
hinttext = self.get_from_hint_text(table, fromhints[table])
if hinttext:
ret += " " + hinttext
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt):
self.isinsert = True
colparams = self._get_colparams(insert_stmt)
if not colparams and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The version of %s you are using does "
"not support empty inserts." %
self.dialect.name)
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT"
prefixes = [self.process(x) for x in insert_stmt._prefixes]
if prefixes:
text += " " + " ".join(prefixes)
text += " INTO " + preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
text += " " + self.get_crud_hint_text(
insert_stmt.table,
dialect_hints[insert_stmt.table]
)
if colparams or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in colparams])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if not colparams and supports_default_values:
text += " DEFAULT VALUES"
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in colparams])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return self.preparer.format_table(from_table)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append({'from': set([update_stmt.table])})
self.isupdate = True
extra_froms = update_stmt._extra_froms
colparams = self._get_colparams(update_stmt, extra_froms)
text = "UPDATE " + self.update_tables_clause(
update_stmt,
update_stmt.table,
extra_froms, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
text += " " + self.get_crud_hint_text(
update_stmt.table,
dialect_hints[update_stmt.table]
)
else:
dialect_hints = None
text += ' SET '
if extra_froms and self.render_table_with_column_in_update_from:
text += ', '.join(
self.visit_column(c[0]) +
'=' + c[1] for c in colparams
)
else:
text += ', '.join(
self.preparer.quote(c[0].name, c[0].quote) +
'=' + c[1] for c in colparams
)
if update_stmt._returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, update_stmt._returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
text += " WHERE " + self.process(update_stmt._whereclause)
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, update_stmt._returning)
self.stack.pop(-1)
return text
def _create_crud_bind_param(self, col, value, required=False):
bindparam = sql.bindparam(col.key, value,
type_=col.type, required=required)
bindparam._is_crud = True
return bindparam._compiler_dispatch(self)
def _get_colparams(self, stmt, extra_tables=None):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
self.postfetch = []
self.prefetch = []
self.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if self.column_keys is None and stmt.parameters is None:
return [
(c, self._create_crud_bind_param(c,
None, required=True))
for c in stmt.table.columns
]
required = object()
# if we have statement parameters - set defaults in the
# compiled params
if self.column_keys is None:
parameters = {}
else:
parameters = dict((sql._column_as_key(key), required)
for key in self.column_keys
if not stmt.parameters or
key not in stmt.parameters)
if stmt.parameters is not None:
for k, v in stmt.parameters.iteritems():
parameters.setdefault(sql._column_as_key(k), v)
# create a list of column assignment clauses as tuples
values = []
need_pks = self.isinsert and \
not self.inline and \
not stmt._returning
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if extra_tables and stmt.parameters:
assert self.isupdate
affected_tables = set()
for t in extra_tables:
for c in t.c:
if c in stmt.parameters:
affected_tables.add(t)
check_columns[c.key] = c
value = stmt.parameters[c]
if sql._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is required)
else:
self.postfetch.append(c)
value = self.process(value.self_group())
values.append((c, value))
# determine tables which are actually
# to be updated - process onupdate and
# server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in stmt.parameters:
continue
elif c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, self.process(c.onupdate.arg.self_group()))
)
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
self.postfetch.append(c)
# iterating through columns at the top to maintain ordering.
# otherwise we might iterate through individual sets of
# "defaults", "primary key cols", etc.
for c in stmt.table.columns:
if c.key in parameters and c.key not in check_columns:
value = parameters.pop(c.key)
if sql._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is required)
elif c.primary_key and implicit_returning:
self.returning.append(c)
value = self.process(value.self_group())
else:
self.postfetch.append(c)
value = self.process(value.self_group())
values.append((c, value))
elif self.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
if c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or \
not self.dialect.sequences_optional):
proc = self.process(c.default)
values.append((c, proc))
self.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c,
self.process(c.default.arg.self_group()))
)
self.returning.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
else:
self.returning.append(c)
else:
if c.default is not None or \
c is stmt.table._autoincrement_column and (
self.dialect.supports_sequences or
self.dialect.preexecute_autoincrement_sequences
):
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or \
not self.dialect.sequences_optional):
proc = self.process(c.default)
values.append((c, proc))
if not c.primary_key:
self.postfetch.append(c)
elif c.default.is_clause_element:
values.append(
(c, self.process(c.default.arg.self_group()))
)
if not c.primary_key:
# dont add primary key column to postfetch
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_default is not None:
if not c.primary_key:
self.postfetch.append(c)
elif self.isupdate:
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, self.process(c.onupdate.arg.self_group()))
)
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
self.postfetch.append(c)
if parameters and stmt.parameters:
check = set(parameters).intersection(
sql._column_as_key(k) for k in stmt.parameters
).difference(check_columns)
if check:
util.warn(
"Unconsumed column names: %s" %
(", ".join(check))
)
return values
def visit_delete(self, delete_stmt):
self.stack.append({'from': set([delete_stmt.table])})
self.isdelete = True
text = "DELETE FROM " + self.preparer.format_table(delete_stmt.table)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
text += " " + self.get_crud_hint_text(
delete_stmt.table,
dialect_hints[delete_stmt.table]
)
else:
dialect_hints = None
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
text += " WHERE " + self.process(delete_stmt._whereclause)
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(engine.Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
return "CREATE SCHEMA " + self.preparer.format_schema(create.element, create.quote)
def visit_drop_schema(self, drop):
text = "DROP SCHEMA " + self.preparer.format_schema(drop.element, drop.quote)
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] + \
table._prefixes + \
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for column in table.columns:
try:
text += separator
separator = ", \n"
text += "\t" + self.get_column_specification(
column,
first_pk=column.primary_key and \
not first_pk
)
if column.primary_key:
first_pk = True
const = " ".join(self.process(constraint) \
for constraint in column.constraints)
if const:
text += " " + const
except exc.CompileError, ce:
# Py3K
#raise exc.CompileError("(in table '%s', column '%s'): %s"
# % (
# table.description,
# column.name,
# ce.args[0]
# )) from ce
# Py2K
raise exc.CompileError("(in table '%s', column '%s'): %s"
% (
table.description,
column.name,
ce.args[0]
)), None, sys.exc_info()[2]
# end Py2K
const = self.create_table_constraints(table)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def create_table_constraints(self, table):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key])
return ", \n\t".join(p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def _index_identifier(self, ident):
if isinstance(ident, sql._truncated_label):
max = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max:
ident = ident[0:max - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
return ident
def visit_create_index(self, create):
index = create.element
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (preparer.quote(self._index_identifier(index.name),
index.quote),
preparer.format_table(index.table),
', '.join(preparer.quote(c.name, c.quote)
for c in index.columns))
return text
def visit_drop_index(self, drop):
index = drop.element
if index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema,
index.table.quote_schema)
else:
schema_name = None
index_name = self.preparer.quote(
self._index_identifier(index.name),
index.quote)
if schema_name:
index_name = schema_name + "." + index_name
return "\nDROP INDEX " + index_name
def visit_add_constraint(self, create):
preparer = self.preparer
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
preparer = self.preparer
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
self.preparer.format_constraint(drop.element),
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, basestring):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(column.server_default.arg)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
sqltext = sql_util.expression_as_ddl(constraint.sqltext)
text += "CHECK (%s)" % self.sql_compiler.process(sqltext)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
preparer.format_constraint(constraint)
remote_table = list(constraint._elements.values())[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name, f.parent.quote)
for f in constraint._elements.values()),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name, f.column.quote)
for f in constraint._elements.values())
)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
class GenericTypeCompiler(engine.TypeCompiler):
def visit_CHAR(self, type_):
return "CHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_NCHAR(self, type_):
return "NCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_FLOAT(self, type_):
return "FLOAT"
def visit_REAL(self, type_):
return "REAL"
def visit_NUMERIC(self, type_):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale' : type_.scale}
def visit_DECIMAL(self, type_):
return "DECIMAL"
def visit_INTEGER(self, type_):
return "INTEGER"
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_CLOB(self, type_):
return "CLOB"
def visit_NCLOB(self, type_):
return "NCLOB"
def visit_VARCHAR(self, type_):
return "VARCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_NVARCHAR(self, type_):
return "NVARCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_BLOB(self, type_):
return "BLOB"
def visit_BINARY(self, type_):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_TEXT(self, type_):
return "TEXT"
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_boolean(self, type_):
return self.visit_BOOLEAN(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_datetime(self, type_):
return self.visit_DATETIME(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_big_integer(self, type_):
return self.visit_BIGINT(type_)
def visit_small_integer(self, type_):
return self.visit_SMALLINT(type_)
def visit_integer(self, type_):
return self.visit_INTEGER(type_)
def visit_real(self, type_):
return self.visit_REAL(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_numeric(self, type_):
return self.visit_NUMERIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_unicode(self, type_):
return self.visit_VARCHAR(type_)
def visit_text(self, type_):
return self.visit_TEXT(type_)
def visit_unicode_text(self, type_):
return self.visit_TEXT(type_)
def visit_enum(self, type_):
return self.visit_VARCHAR(type_)
def visit_null(self, type_):
raise NotImplementedError("Can't generate DDL for the null type")
def visit_type_decorator(self, type_):
return self.process(type_.type_engine(self.dialect))
def visit_user_defined(self, type_):
return type_.get_col_spec()
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(unicode(value))
or (lc_value != value))
def quote_schema(self, schema, force):
"""Quote a schema.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.quote(schema, force)
def quote(self, ident, force):
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name, sequence.quote)
if not self.omit_schema and use_schema and \
sequence.schema is not None:
name = self.quote_schema(sequence.schema, sequence.quote) + \
"." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name, label.quote)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name, alias.quote)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident, savepoint.quote)
def format_constraint(self, constraint):
return self.quote(constraint.name, constraint.quote)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name, table.quote)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema, table.quote_schema) + \
"." + result
return result
def format_schema(self, name, quote):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + \
self.quote(name, column.quote)
else:
return self.quote(name, column.quote)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(column.table,
use_schema=False, name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema, table.quote_schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{ 'initial': initial,
'final': final,
'escaped': escaped_final })
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
SohKai/ChronoLogger
|
web/flask/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py
|
Python
|
mit
| 79,310
|
[
"VisIt"
] |
9a53a7bb12b759843f6730d1a356855625f8225bdb9b2e22cc500af168f4c1c7
|
"""
Testing the CTI Correction Algorithm
====================================
This script can be used to test the CTI correction algorithm performance.
:requires: NumPy
:requires: PyFITS
:requires: matplotlib
:version: 0.3
:author: Sami-Matias Niemi
:contact: s.niemi@ucl.ac.uk
"""
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import matplotlib.pyplot as plt
import glob as g
import pyfits as pf
import numpy as np
import cPickle, os, datetime, shutil
from multiprocessing import Pool
from analysis import shape
from support import logger as lg
from support import files as fileIO
from CTI import CTI
def ThibautsCDM03params():
return dict(beta_p=0.29, beta_s=0.12, fwc=200000., vth=1.62E+07,
t=2.10E-02, vg=7.20E-11, st=5.00E-06, sfwc=1450000., svg=3.00E-10)
def MSSLCDM03params():
return dict(beta_p=0.29, beta_s=0.12, fwc=200000., vth=1.168e7,
t=20.48e-3, vg=6.e-11, st=5.0e-6, sfwc=730000., svg=1.20E-10)
def testCTIcorrection(log, files, sigma=0.75, iterations=4, xcen=1900, ycen=1900, side=20):
"""
Calculates PSF properties such as ellipticity and size from data without CTI and from
CTI data.
:param log: python logger instance
:type log: instance
:param files: a list of files to be processed
:type files: list
:param sigma: size of the Gaussian weighting function
:type sigma: float
:param iterations: the number of iterations for the moment based shape estimator
:type iterations: int
:param xcen: x-coordinate of the object centre
:type xcen: int
:param ycen: y-coordinate of the object centre
:type ycen: int
:param side: size of the cutout around the centre (+/- side)
:type side: int
:return: ellipticity and size
:rtype: dict
"""
settings = dict(sigma=sigma, iterations=iterations)
eclean = []
e1clean = []
e2clean = []
R2clean = []
eCTI = []
e1CTI = []
e2CTI = []
R2CTI = []
for file in files:
#load no cti data
nocti = pf.getdata(file.replace('CTI', 'nocti'))[ycen-side:ycen+side, xcen-side:xcen+side]
#subtract background
nocti -= 27.765714285714285
nocti[nocti < 0.] = 0. #remove negative numbers
#load CTI data
CTI = pf.getdata(file)[ycen-side:ycen+side, xcen-side:xcen+side]
CTI[CTI < 0.] = 0. #remove negative numbers
sh = shape.shapeMeasurement(nocti, log, **settings)
results = sh.measureRefinedEllipticity()
eclean.append(results['ellipticity'])
e1clean.append(results['e1'])
e2clean.append(results['e2'])
R2clean.append(results['R2'])
sh = shape.shapeMeasurement(CTI, log, **settings)
results = sh.measureRefinedEllipticity()
eCTI.append(results['ellipticity'])
e1CTI.append(results['e1'])
e2CTI.append(results['e2'])
R2CTI.append(results['R2'])
results = {'eclean' : np.asarray(eclean),
'e1clean' : np.asarray(e1clean),
'e2clean' : np.asarray(e2clean),
'R2clean' : np.asarray(R2clean),
'eCTI' : np.asarray(eCTI),
'e1CTI' : np.asarray(e1CTI),
'e2CTI' : np.asarray(e2CTI),
'R2CTI' : np.asarray(R2CTI)}
#save to a file
fileIO.cPickleDumpDictionary(results, 'results.pk')
return results
def testCTIcorrectionNonoise(log, files, output, sigma=0.75, iterations=4):
"""
Calculates PSF properties such as ellipticity and size from data w/ and w/o CTI.
:param log: python logger instance
:type log: instance
:param files: a list of files to be processed
:type files: list
:param sigma: size of the Gaussian weighting function
:type sigma: float
:param iterations: the number of iterations for the moment based shape estimator
:type iterations: int
:return: ellipticity and size
:rtype: dict
"""
eclean = []
e1clean = []
e2clean = []
R2clean = []
xclean = []
yclean = []
eCTI = []
e1CTI = []
e2CTI = []
R2CTI = []
xCTI = []
yCTI = []
eCTIfixed = []
e1CTIfixed = []
e2CTIfixed = []
R2CTIfixed = []
xCTIfixed = []
yCTIfixed = []
fh = open(output.replace('pk', 'csv'), 'w')
fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\n')
for f in files:
print 'Processing: ', f
#reset settings
settings = dict(sigma=sigma, iterations=iterations)
#load no cti data
nocti = pf.getdata(f.replace('CUT', 'CUTnoctinonoise'))
#load CTI data
CTI = pf.getdata(f)
sh = shape.shapeMeasurement(nocti, log, **settings)
results = sh.measureRefinedEllipticity()
eclean.append(results['ellipticity'])
e1clean.append(results['e1'])
e2clean.append(results['e2'])
R2clean.append(results['R2'])
xclean.append(results['centreX'])
yclean.append(results['centreY'])
#CTI, fitted centroid
sh = shape.shapeMeasurement(CTI.copy(), log, **settings)
results2 = sh.measureRefinedEllipticity()
eCTI.append(results2['ellipticity'])
e1CTI.append(results2['e1'])
e2CTI.append(results2['e2'])
R2CTI.append(results2['R2'])
xCTI.append(results2['centreX'])
yCTI.append(results2['centreY'])
#fixed centroid
settings['fixedPosition'] = True
settings['fixedX'] = results['centreX']
settings['fixedY'] = results['centreY']
settings['iterations'] = 1
sh = shape.shapeMeasurement(CTI.copy(), log, **settings)
results3 = sh.measureRefinedEllipticity()
eCTIfixed.append(results3['ellipticity'])
e1CTIfixed.append(results3['e1'])
e2CTIfixed.append(results3['e2'])
R2CTIfixed.append(results3['R2'])
xCTIfixed.append(results3['centreX'])
yCTIfixed.append(results3['centreY'])
text = '%s,%e,%e,%e,%e,%e,%e\n' % (f, results['ellipticity'] - results2['ellipticity'],
results['e1'] - results2['e1'], results['e2'] - results2['e2'], results['R2'] - results2['R2'],
results['centreX'] - results2['centreX'], results['centreY'] - results2['centreY'])
fh.write(text)
print text
fh.close()
results = {'eclean' : np.asarray(eclean),
'e1clean' : np.asarray(e1clean),
'e2clean' : np.asarray(e2clean),
'R2clean' : np.asarray(R2clean),
'xclean' : np.asarray(xclean),
'yclean' : np.asarray(yclean),
'eCTI' : np.asarray(eCTI),
'e1CTI' : np.asarray(e1CTI),
'e2CTI' : np.asarray(e2CTI),
'R2CTI' : np.asarray(R2CTI),
'xCTI' : np.asarray(xCTI),
'yCTI' : np.asarray(yCTI),
'eCTIfixed': np.asarray(eCTIfixed),
'e1CTIfixed': np.asarray(e1CTIfixed),
'e2CTIfixed': np.asarray(e2CTIfixed),
'R2CTIfixed': np.asarray(R2CTIfixed),
'xCTIfixed': np.asarray(xCTIfixed),
'yCTIfixed': np.asarray(yCTIfixed)}
#save to a file
fileIO.cPickleDumpDictionary(results, output)
return results
def plotResults(results):
"""
Plot the CTI correction algorithm results.
:param results: CTI test results
:return: None
"""
e = results['eclean'] - results['eCTI']
e1 = results['e1clean'] - results['e1CTI']
e2 = results['e2clean'] - results['e2CTI']
print 'Delta e, e_1, e_2:', np.mean(e), np.mean(e1), np.mean(e2)
print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(e, bins=15, label='$e$', alpha=0.5)
ax.hist(e1, bins=15, label='$e_{2}$', alpha=0.5)
ax.hist(e2, bins=15, label='$e_{1}$', alpha=0.5)
ax.set_xlabel(r'$\delta e$ [no CTI - CDM03 corrected]')
plt.legend(shadow=True, fancybox=True)
plt.savefig('ellipticityDelta.pdf')
plt.close()
r2 = (results['R2clean'] - results['R2CTI'])/results['R2clean']
print 'delta R2 / R2: mean, std ', np.mean(r2), np.std(r2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(r2, bins=15, label='$R^{2}$')
ax.set_xlabel(r'$\frac{\delta R^{2}}{R^{2}_{ref}}$ [no CTI - CDM03 corrected]')
plt.legend(shadow=True, fancybox=True)
plt.savefig('sizeDelta.pdf')
plt.close()
def plotResultsNoNoise(inputfile, title, bins=10):
"""
Plot the CTI correction algorithm results.
:return: None
"""
path = datetime.datetime.now().isoformat()
os.mkdir(path)
path += '/'
results = cPickle.load(open(inputfile))
#copy input to the path
try:
shutil.copy2(inputfile, path+inputfile)
except:
pass
print '\n\n\n\nFitted centre:'
e = results['eclean'] - results['eCTI']
e1 = results['e1clean'] - results['e1CTI']
e2 = results['e2clean'] - results['e2CTI']
x = results['xclean'] - results['xCTI']
y = results['yclean'] - results['yCTI']
r2 = (results['R2clean'] - results['R2CTI']) / results['R2clean']
meane = np.mean(e)
meane1 = np.mean(e1)
meane2 = np.mean(e2)
meanx = np.mean(x)
meany = np.mean(y)
meanr2 = np.mean(r2)
print 'Delta e, e_1, e_2:', meane, meane1, meane2
#print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)
ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)
ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)
ax.axvline(x=meane, color='b', label='%.2e' % meane)
ax.axvline(x=meane1, color='r', label='%.2e' % meane1)
ax.axvline(x=meane2, color='g', label='%.2e' % meane2)
ax.set_xlabel(r'$\delta e$ [w/o - w/ CTI]')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'ellipticityDeltaFittedCentre.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')
ax.set_xlabel(r'$\delta e_{1}$')
ax.set_ylabel(r'$\delta e_{2}$')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'ellipticityFittedCentre.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')
ax.scatter(results['e1CTI'], results['e2CTI'], s=8, color='r', marker='o', alpha=0.4, label='CTI')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_xlabel(r'$e_{1}$')
ax.set_ylabel(r'$e_{2}$')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'e1vse2FittedCentre.pdf')
plt.close()
print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(r2, bins=bins, color='b', label='$R^{2}$')
ax.axvline(x=meanr2,color='b', label='%.2e' % meanr2)
ax.set_xlabel(r'$\frac{\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'sizeDeltaFittedCentre.pdf')
plt.close()
print 'delta x: mean, std ', meanx, np.std(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(x, bins=bins, color='b', label='X Centre')
ax.axvline(x=meanx,color='b', label='%.2e' % meanx)
ax.set_xlabel(r'$\delta X - X_{CTI}$ [w/o - w CTI]')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'xDeltaFittedCentre.pdf')
plt.close()
print 'delta y: mean, std ', meany, np.std(y)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(y, bins=bins, color='b', label='Y Centre')
ax.axvline(x=meany,color='b', label='%.2e' % meany)
ax.set_xlabel(r'$\delta Y - Y_{CTI}$ [w/o - w CTI]')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'yDeltaFittedCentre.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')
ax.set_xlabel(r'$\delta X$')
ax.set_ylabel(r'$\delta Y$')
plt.legend(shadow=True, fancybox=True, scatterpoints=1)
plt.savefig(path+'coordinatesFittedCentre.pdf')
plt.close()
print '\n\n\n\nFixed centre:'
e = results['eclean'] - results['eCTIfixed']
e1 = results['e1clean'] - results['e1CTIfixed']
e2 = results['e2clean'] - results['e2CTIfixed']
x = results['xclean'] - results['xCTIfixed']
y = results['yclean'] - results['yCTIfixed']
r2 = (results['R2clean'] - results['R2CTIfixed']) / results['R2clean']
meane = np.mean(e)
meane1 = np.mean(e1)
meane2 = np.mean(e2)
meanx = np.mean(x)
meany = np.mean(y)
meanr2 = np.mean(r2)
print 'Delta e, e_1, e_2:', meane, meane1, meane2
#print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)
ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)
ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)
ax.axvline(x=meane, color='b', label='%.2e' % meane)
ax.axvline(x=meane1, color='r', label='%.2e' % meane1)
ax.axvline(x=meane2, color='g', label='%.2e' % meane2)
ax.set_xlabel(r'$\delta e$ [w/o - w/ CTI]')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'ellipticityDeltaFixedCentre.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')
ax.set_xlabel(r'$\delta e_{1}$')
ax.set_ylabel(r'$\delta e_{2}$')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'ellipticityFixedCentre.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')
ax.scatter(results['e1CTIfixed'], results['e2CTIfixed'], s=8, color='r', marker='o', alpha=0.4, label='CTI')
ax.set_xlabel(r'$e_{1}$')
ax.set_ylabel(r'$e_{2}$')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'e1vse2FixedCentre.pdf')
plt.close()
print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(r2, bins=bins, color='b', label='$R^{2}$')
ax.axvline(x=meanr2, color='b', label='%.2e' % meanr2)
ax.set_xlabel(r'$\frac{\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'sizeDeltaFixedCentre.pdf')
plt.close()
print 'delta x: mean, std ', meanx, np.std(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(x, bins=bins, color='b', label='X Centre')
ax.axvline(x=meanx, color='b', label='%.2e' % meanx)
ax.set_xlabel(r'$X - X_{CTI}$')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'xDeltaFixedCentre.pdf')
plt.close()
print 'delta y: mean, std ', meany, np.std(y)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.hist(y, bins=bins, color='b', label='Y Centre')
ax.axvline(x=meany, color='b', label='%.2e' % meany)
ax.set_xlabel(r'$Y - Y_{CTI}$')
plt.legend(shadow=True, fancybox=True)
plt.savefig(path+'yDeltaFixedCentre.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')
ax.set_xlabel(r'$\delta X$')
ax.set_ylabel(r'$\delta Y$')
plt.legend(shadow=True, fancybox=True, scatterpoints=1)
plt.savefig(path+'coordinatesFixedCentre.pdf')
plt.close()
def cutoutRegions(files, xcen=1900, ycen=1900, side=140):
"""
:param files:
:param xcen:
:param ycen:
:param side:
:return:
"""
print 'Generating postage stamp images'
for f in files:
print 'Processing: ', f
#load no cti data
fh = pf.open('noctinonoise' + f)
data = fh[1].data
#subtract background
#print '%.15f' % np.median(pf.getdata('noctinonoise' + f)[100:1000, 100:1000])
d = data[ycen - side:ycen + side, xcen - side:xcen + side]
d -= 103.258397270204995
d[d < 0.] = 0.
fh[1].data = d
fh.writeto('CUTnoctinonoise' + f, clobber=True)
#load CTI data
fh = pf.open(f)
data = fh[1].data
#subtract background
#print '%.15f' % np.median(pf.getdata('noctinonoise' + f)[100:1000, 100:1000])
d = data[ycen - side:ycen + side, xcen - side:xcen + side]
d -= 103.181143639616520
d[d < 0.] = 0.
fh[1].data = d
fh.writeto('CUT' + f, clobber=True)
def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000,
datadir='/Users/smn2/EUCLID/CTItesting/uniform/',
thibautCDM03=False, beta=False, serial=1, parallel=1):
"""
Test the impact of CTI in case of no noise and no correction.
:param log: logger instance
:param bcgr: background in electrons for the CTI modelling
:param sigma: size of the weighting function for the quadrupole moment
:param iterations: number of iterations in the quadrupole moments estimation
:param loc: location to which the galaxy will be placed [default=1900]
:param galaxies: number of galaxies to use (< 10000)
:param datadir: directory pointing to the galaxy images
:return:
"""
files = g.glob(datadir + '*.fits')
#pick randomly
files = np.random.choice(files, galaxies, replace=False)
#trap parameters: parallel
if thibautCDM03:
f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'
f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'
params = ThibautsCDM03params()
params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))
else:
f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'
f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat'
params = MSSLCDM03params()
params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))
if beta:
params.update(dict(beta_p=0.6, beta_s=0.6))
print f1, f2
#store shapes
eclean = []
e1clean = []
e2clean = []
R2clean = []
xclean = []
yclean = []
eCTI = []
e1CTI = []
e2CTI = []
R2CTI = []
xCTI = []
yCTI = []
eCTIfixed = []
e1CTIfixed = []
e2CTIfixed = []
R2CTIfixed = []
xCTIfixed = []
yCTIfixed = []
fh = open(output.replace('.pk', '.csv'), 'w')
fh.write('#files: %s and %s\n' % (f1, f2))
for key in params:
print key, params[key]
fh.write('# %s = %s\n' % (key, str(params[key])))
fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\n')
for f in files:
print 'Processing: ', f
#load data
nocti = pf.getdata(f)
#scale to SNR about 10 (average galaxy, a single exposure)
nocti /= np.sum(nocti)
nocti *= 1500.
#place it on canvas
tmp = np.zeros((2066, 2048))
ysize, xsize = nocti.shape
ysize /= 2
xsize /= 2
tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()
#add background
tmp += bcgr
#run CDM03
c = CTI.CDM03bidir(params, [])
tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose()
#remove background and make a cutout
CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]
CTIdata -= bcgr
CTIdata[CTIdata < 0.] = 0.
#write files
#fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False)
#fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False)
#reset settings
settings = dict(sigma=sigma, iterations=iterations)
#calculate shapes
sh = shape.shapeMeasurement(nocti.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
eclean.append(results['ellipticity'])
e1clean.append(results['e1'])
e2clean.append(results['e2'])
R2clean.append(results['R2'])
xclean.append(results['centreX'])
yclean.append(results['centreY'])
#CTI, fitted centroid
sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)
results2 = sh.measureRefinedEllipticity()
eCTI.append(results2['ellipticity'])
e1CTI.append(results2['e1'])
e2CTI.append(results2['e2'])
R2CTI.append(results2['R2'])
xCTI.append(results2['centreX'])
yCTI.append(results2['centreY'])
#fixed centroid
settings['fixedPosition'] = True
settings['fixedX'] = results['centreX']
settings['fixedY'] = results['centreY']
settings['iterations'] = 1
sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)
results3 = sh.measureRefinedEllipticity()
eCTIfixed.append(results3['ellipticity'])
e1CTIfixed.append(results3['e1'])
e2CTIfixed.append(results3['e2'])
R2CTIfixed.append(results3['R2'])
xCTIfixed.append(results3['centreX'])
yCTIfixed.append(results3['centreY'])
text = '%s,%e,%e,%e,%e,%e,%e\n' % (f, results['ellipticity'] - results2['ellipticity'],
results['e1'] - results2['e1'], results['e2'] - results2['e2'],
results['R2'] - results2['R2'],
results['centreX'] - results2['centreX'],
results['centreY'] - results2['centreY'])
fh.write(text)
print text
fh.close()
results = {'eclean': np.asarray(eclean),
'e1clean': np.asarray(e1clean),
'e2clean': np.asarray(e2clean),
'R2clean': np.asarray(R2clean),
'xclean': np.asarray(xclean),
'yclean': np.asarray(yclean),
'eCTI': np.asarray(eCTI),
'e1CTI': np.asarray(e1CTI),
'e2CTI': np.asarray(e2CTI),
'R2CTI': np.asarray(R2CTI),
'xCTI': np.asarray(xCTI),
'yCTI': np.asarray(yCTI),
'eCTIfixed': np.asarray(eCTIfixed),
'e1CTIfixed': np.asarray(e1CTIfixed),
'e2CTIfixed': np.asarray(e2CTIfixed),
'R2CTIfixed': np.asarray(R2CTIfixed),
'xCTIfixed': np.asarray(xCTIfixed),
'yCTIfixed': np.asarray(yCTIfixed)}
#save to a file
fileIO.cPickleDumpDictionary(results, output)
return results
def addCTI(file, loc=1900, bcgr=72.2, thibautCDM03=False, beta=True, serial=1, parallel=1):
"""
Add CTI trails to a FITS file or input data.
"""
#trap parameters: parallel
if thibautCDM03:
f1 = '/Users/sammy/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'
f2 = '/Users/sammy/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'
params = ThibautsCDM03params()
params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))
else:
f1 = '/Users/sammy/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'
f2 = '/Users/sammy/EUCLID/vissim-python/data/cdm_euclid_serial.dat'
params = MSSLCDM03params()
params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))
if beta:
params.update(dict(beta_p=0.6, beta_s=0.6))
print f1, f2
#load data
if type(file) is str:
nocti = pf.getdata(file)
else:
nocti = file.copy()
#place it on canvas
tmp = np.zeros((2066, 2048))
ysize, xsize = nocti.shape
ysize /= 2
xsize /= 2
tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()
#add background
tmp += bcgr
#run CDM03
c = CTI.CDM03bidir(params, [])
tmp = c.applyRadiationDamage(tmp)
#remove background and make a cutout
CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]
CTIdata -= bcgr
CTIdata[CTIdata < 0.] = 0.
return CTIdata
def simpleTest(log, sigma=0.75, iterations=50):
#Thibauts data
folder = '/Users/sammy/EUCLID/CTItesting/uniform/'
wcti = pf.getdata(folder +
'galaxy_100mas_dist2_q=0.5078_re=6.5402_theta=0.91895_norm=1000_dx=0.3338_dy=0.0048CTI.fits')
wocti = pf.getdata(folder +
'galaxy_100mas_dist2_q=0.5078_re=6.5402_theta=0.91895_norm=1000_dx=0.3338_dy=0.0048noCTI.fits')
#reset settings
settings = dict(sigma=sigma, iterations=iterations)
#calculate shapes
sh = shape.shapeMeasurement(wcti, log, **settings)
wctiresults = sh.measureRefinedEllipticity()
sh = shape.shapeMeasurement(wocti, log, **settings)
woctiresults = sh.measureRefinedEllipticity()
#include CTI with my recipe
ctiMSSL = addCTI(wocti.copy())
ctiThibault = addCTI(wocti.copy(), thibautCDM03=True)
sh = shape.shapeMeasurement(ctiMSSL, log, **settings)
wMSSLctiresults = sh.measureRefinedEllipticity()
sh = shape.shapeMeasurement(ctiThibault, log, **settings)
wThibautctiresults = sh.measureRefinedEllipticity()
fileIO.writeFITS(ctiMSSL, 'tmp1.fits', int=False)
fileIO.writeFITS(ctiThibault, 'tmp2.fits', int=False)
fileIO.writeFITS(wcti/ctiMSSL, 'tmp3.fits', int=False)
for key in wctiresults:
tmp1 = wctiresults[key] - wMSSLctiresults[key]
tmp2 = wctiresults[key] - wThibautctiresults[key]
if 'Gaussian' in key:
print key, np.max(np.abs(tmp1)), np.max(np.abs(tmp2))
else:
print key, tmp1, tmp2
if __name__ == '__main__':
log = lg.setUpLogger('CTItesting.log')
#simple test with Thibaut's files
simpleTest(log)
#use Thibaut's input galaxies
galaxies = 800
#thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDataP.pk', galaxies=galaxies, serial=-1)
#thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDatab6P.pk', beta=True, galaxies=galaxies, serial=-1)
#thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDataThibautsCDM03P.pk', thibautCDM03=True, galaxies=galaxies, serial=-1)
#plotResultsNoNoise('resultsNoNoiseThibautsDataP.pk', 'MSSL CDM03 Parameters (beta=0.29, 0.12) (parallel only)')
#plotResultsNoNoise('resultsNoNoiseThibautsDatab6P.pk', 'MSSL CDM03 Parameters (beta=0.6, 0.6) (parallel only)')
#plotResultsNoNoise('resultsNoNoiseThibautsDataThibautsCDM03P.pk', 'Thibaut CDM03 Parameters (parallel only)')
#thibaut = useThibautsData(log, 'resultsNoNoiseThibautsData.pk', galaxies=galaxies)
#thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDatab6.pk', beta=True, galaxies=galaxies)
#thibaut = useThibautsData(log, 'resultsNoNoiseThibautsDataThibautsCDM03.pk', thibautCDM03=True, galaxies=galaxies)
#plotResultsNoNoise('resultsNoNoiseThibautsData.pk', 'MSSL CDM03 Parameters (beta=0.29, 0.12)')
#plotResultsNoNoise('resultsNoNoiseThibautsDatab6.pk', 'MSSL CDM03 Parameters (beta=0.6, 0.6)')
#plotResultsNoNoise('resultsNoNoiseThibautsDataThibautsCDM03.pk', 'Thibaut CDM03 Parameters')
#cut out regions
#cutoutRegions(g.glob('Q0_00_00stars*.fits'))
#cutoutRegions(g.glob('Q0_00_00galaxy*.fits'))
#use the cutouts -- stars
#results = testCTIcorrectionNonoise(log, g.glob('CUTQ*stars*.fits'), 'resultsNoNoiseStars.pk', iterations=4)
#plotResultsNoNoise('resultsNoNoiseStars.pk')
#galaxies
#results = testCTIcorrectionNonoise(log, g.glob('CUTQ*galaxy*.fits'), 'resultsNoNoiseGalaxies.pk', iterations=4)
#plotResultsNoNoise('resultsNoNoiseGalaxies.pk')
#results = testCTIcorrection(log, g.glob('CTIQ0_00_00stars*'), iterations=8, side=25)
#plotResults(results)
|
sniemi/EuclidVisibleInstrument
|
analysis/testCTIcorrection.py
|
Python
|
bsd-2-clause
| 28,754
|
[
"Galaxy",
"Gaussian"
] |
73e25729dbd3de9c0b65244593cf394c81770fac74d4522c2806b55dc9ee496b
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.186964
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/web/serviceplayable.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class serviceplayable(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(serviceplayable, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_67069415 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2serviceplayable>
\t<e2servicereference>''')
_v = VFFSL(SL,"service.servicereference",True) # u'$service.servicereference' on line 4, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicereference')) # from line 4, col 22.
write(u'''</e2servicereference>
\t<e2isplayable>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"service.isplayable",True)) # u'$str($service.isplayable)' on line 5, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$str($service.isplayable)')) # from line 5, col 16.
write(u'''</e2isplayable>
</e2serviceplayable>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_67069415
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_serviceplayable= 'respond'
## END CLASS DEFINITION
if not hasattr(serviceplayable, '_initCheetahAttributes'):
templateAPIClass = getattr(serviceplayable, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(serviceplayable)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=serviceplayable()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/web/serviceplayable.py
|
Python
|
gpl-2.0
| 5,379
|
[
"VisIt"
] |
58b2127ec3db821aae3c20d46ce04ea644ba3cda8a196eb6c487142fe784abc6
|
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This system test module is useful to identify if some of the key components required for Iris are available.
The system tests can be run with ``python setup.py test --system-tests``.
"""
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised before importing anything else
import numpy as np
import iris
import iris.fileformats.grib as grib
import iris.fileformats.netcdf as netcdf
import iris.fileformats.pp as pp
import iris.tests as tests
import iris.unit
class SystemInitialTest(tests.IrisTest):
def system_test_supported_filetypes(self):
nx, ny = 60, 60
dataarray = np.arange(nx * ny, dtype='>f4').reshape(nx, ny)
laty = np.linspace(0, 59, ny).astype('f8')
lonx = np.linspace(30, 89, nx).astype('f8')
horiz_cs = lambda : iris.coord_systems.GeogCS(6371229)
cm = iris.cube.Cube(data=dataarray, long_name="System test data", units='m s-1')
cm.add_dim_coord(
iris.coords.DimCoord(laty, 'latitude', units='degrees',
coord_system=horiz_cs()),
0)
cm.add_dim_coord(
iris.coords.DimCoord(lonx, 'longitude', units='degrees',
coord_system=horiz_cs()),
1)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([9], 'i8'),
'forecast_period', units='hours'))
hours_since_epoch = iris.unit.Unit('hours since epoch',
iris.unit.CALENDAR_GREGORIAN)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([3], 'i8'),
'time', units=hours_since_epoch))
cm.add_aux_coord(iris.coords.AuxCoord(np.array([99], 'i8'),
long_name='pressure', units='Pa'))
cm.assert_valid()
for filetype in ('.nc', '.pp', '.grib2'):
saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
iris.save(cm, saved_tmpfile)
new_cube = iris.load_cube(saved_tmpfile)
self.assertCML(new_cube, ('system', 'supported_filetype_%s.cml' % filetype))
def system_test_grib_patch(self):
import gribapi
gm = gribapi.grib_new_from_samples("GRIB2")
result = gribapi.grib_get_double(gm, "missingValue")
new_missing_value = 123456.0
gribapi.grib_set_double(gm, "missingValue", new_missing_value)
new_result = gribapi.grib_get_double(gm, "missingValue")
self.assertEqual(new_result, new_missing_value)
def system_test_imports_general(self):
if tests.MPL_AVAILABLE:
import matplotlib
import netCDF4
if __name__ == '__main__':
tests.main()
|
Jozhogg/iris
|
lib/iris/tests/system_test.py
|
Python
|
lgpl-3.0
| 3,527
|
[
"NetCDF"
] |
de35cdec596c11954e2360eeed958049b60bd69ad13279d25364875748943847
|
"""
This migration script adds the request_event table and
removes the state field in the request table
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.exc import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
metadata = MetaData( migrate_engine )
db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
def display_migration_details():
print "========================================"
print "This migration script adds the request_event table and"
print "removes the state field in the request table"
print "========================================"
def localtimestamp():
if migrate_engine.name == 'postgres' or migrate_engine.name == 'mysql':
return "LOCALTIMESTAMP"
elif migrate_engine.name == 'sqlite':
return "current_date || ' ' || current_time"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % db )
def nextval( table, col='id' ):
if migrate_engine.name == 'postgres':
return "nextval('%s_%s_seq')" % ( table, col )
elif migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
return "null"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name )
RequestEvent_table = Table('request_event', metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "request_id", Integer, ForeignKey( "request.id" ), index=True ),
Column( "state", TrimmedString( 255 ), index=True ),
Column( "comment", TEXT ) )
def upgrade():
display_migration_details()
# Load existing tables
metadata.reflect()
# Add new request_event table
try:
RequestEvent_table.create()
except Exception, e:
log.debug( "Creating request_event table failed: %s" % str( e ) )
# move the current state of all existing requests to the request_event table
cmd = \
"INSERT INTO request_event " + \
"SELECT %s AS id," + \
"%s AS create_time," + \
"%s AS update_time," + \
"request.id AS request_id," + \
"request.state AS state," + \
"'%s' AS comment " + \
"FROM request;"
cmd = cmd % ( nextval('request_event'), localtimestamp(), localtimestamp(), 'Imported from request table')
db_session.execute( cmd )
# Delete the state column
try:
Request_table = Table( "request", metadata, autoload=True )
except NoSuchTableError:
Request_table = None
log.debug( "Failed loading table request" )
if Request_table:
try:
Request_table.c.state.drop()
except Exception, e:
log.debug( "Deleting column 'state' to request table failed: %s" % ( str( e ) ) )
def downgrade():
pass
|
volpino/Yeps-EURAC
|
lib/galaxy/model/migrate/versions/0027_request_events.py
|
Python
|
mit
| 3,386
|
[
"Galaxy"
] |
fdbcc446519700c62f916b7818304db4ddd8810809b8975fd1cbbcce0b6c2b91
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# Copyright (c) 2009-2010 Arista Networks, Inc.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""basic checker for Python code"""
import collections
import itertools
import sys
import re
import six
from six.moves import zip # pylint: disable=redefined-builtin
from logilab.common.ureports import Table
import astroid
import astroid.bases
from astroid import are_exclusive, InferenceError
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.utils import EmptyReport
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
clobber_in_except,
is_builtin_object,
is_inside_except,
overrides_a_method,
safe_infer,
get_argument_from_call,
has_known_bases,
NoSuchArgumentError,
is_import_error,
)
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
CLASS_ATTRIBUTE_RGX = re.compile(r'([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
REVERSED_METHODS = (('__getitem__', '__len__'),
('__reversed__', ))
PY33 = sys.version_info >= (3, 3)
PY3K = sys.version_info >= (3, 0)
BAD_FUNCTIONS = ['map', 'filter']
if sys.version_info < (3, 0):
BAD_FUNCTIONS.append('input')
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = set(('exempt', 'ignore'))
# A mapping from builtin-qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(['.'.join([astroid.bases.BUILTINS, x]) for x in ('set', 'dict', 'list')],
['set()', '{}', '[]'])
)
del re
def _redefines_import(node):
""" Detect that the given node (AssName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not is_import_error(current.parent):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.From, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _loop_exits_early(loop):
"""Returns true if a loop has a break statement in its body."""
loop_nodes = (astroid.For, astroid.While)
# Loop over body explicitly to avoid matching break statements
# in orelse.
for child in loop.body:
if isinstance(child, loop_nodes):
# break statement may be in orelse of child loop.
# pylint: disable=superfluous-parens
for orelse in (child.orelse or ()):
for _ in orelse.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
continue
for _ in child.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
return False
def _is_multi_naming_match(match, node_type, confidence):
return (match is not None and
match.lastgroup is not None and
match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != 'method' or confidence != INFERENCE_FAILURE))
if sys.version_info < (3, 0):
PROPERTY_CLASSES = set(('__builtin__.property', 'abc.abstractproperty'))
else:
PROPERTY_CLASSES = set(('builtins.property', 'abc.abstractproperty'))
ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
def _determine_function_name_type(node):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:returns: One of ('function', 'method', 'attr')
"""
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Getattr) and
decorator.attrname == 'abstractproperty')):
infered = safe_infer(decorator)
if infered and infered.qname() in PROPERTY_CLASSES:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Getattr) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method'
def decorated_with_abc(func):
""" Determine if the `func` node is decorated
with `abc` decorators (abstractmethod et co.)
"""
if func.decorators:
for node in func.decorators.nodes:
try:
infered = next(node.infer())
except InferenceError:
continue
if infered and infered.qname() in ABC_METHODS:
return True
def has_abstract_methods(node):
"""
Determine if the given `node` has
abstract methods, defined with `abc` module.
"""
return any(decorated_with_abc(meth)
for meth in node.methods())
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise EmptyReport()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Getattr) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit '
'return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).',
{'maxversion': (3, 3)}),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'E0110': ('Abstract class with abstract methods instantiated',
'abstract-class-instantiated',
'Used when an abstract class with `abc.ABCMeta` as metaclass '
'has abstract methods and is instantiated.'),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
}
@check_messages('function-redefined')
def visit_class(self, node):
self._check_redefinition('class', node)
@check_messages('init-is-generator', 'return-in-init',
'function-redefined', 'return-arg-in-generator',
'duplicate-argument-name')
def visit_function(self, node):
if not redefined_by_decorator(node):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astroid.Return,
skip_klass=(astroid.Function, astroid.Class))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('init-is-generator', node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if [v for v in values
if not (v is None or
(isinstance(v, astroid.Const) and v.value is None) or
(isinstance(v, astroid.Name) and v.name == 'None')
)]:
self.add_message('return-in-init', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
if not PY33:
for retnode in returns:
if isinstance(retnode.value, astroid.Const) and \
retnode.value.value is not None:
self.add_message('return-arg-in-generator', node=node,
line=retnode.fromlineno)
# Check for duplicate names
args = set()
for name in node.argnames():
if name in args:
self.add_message('duplicate-argument-name', node=node, args=(name,))
else:
args.add(name)
@check_messages('return-outside-function')
def visit_return(self, node):
if not isinstance(node.frame(), astroid.Function):
self.add_message('return-outside-function', node=node)
@check_messages('yield-outside-function')
def visit_yield(self, node):
if not isinstance(node.frame(), (astroid.Function, astroid.Lambda)):
self.add_message('yield-outside-function', node=node)
@check_messages('not-in-loop')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@check_messages('not-in-loop')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@check_messages('useless-else-on-loop')
def visit_for(self, node):
self._check_else_on_loop(node)
@check_messages('useless-else-on-loop')
def visit_while(self, node):
self._check_else_on_loop(node)
@check_messages('nonexistent-operator')
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astroid.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('nonexistent-operator', node=node, args=node.op*2)
@check_messages('abstract-class-instantiated')
def visit_callfunc(self, node):
""" Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
infered = next(node.func.infer())
except astroid.InferenceError:
return
if not isinstance(infered, astroid.Class):
return
# __init__ was called
metaclass = infered.metaclass()
abstract_methods = has_abstract_methods(infered)
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in infered.ancestors():
if ancestor.qname() == 'abc.ABC' and abstract_methods:
self.add_message('abstract-class-instantiated', node=node)
break
return
if metaclass.qname() == 'abc.ABCMeta' and abstract_methods:
self.add_message('abstract-class-instantiated', node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('useless-else-on-loop', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
break
_node = _node.parent
else:
self.add_message('not-in-loop', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('function-redefined', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IAstroidChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" '
'statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in '
'a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) '
'any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course '
'has no effect). This is a particular case of W0104 with its '
'own message so you can easily disable it if you\'re using '
'those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned '
'to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call '
'on the same argument list as the lambda itself; such lambda '
'expressions are in all but a few cases replaceable with the '
'function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
'Used when a dictionary expression binds the same key multiple '
'times.'),
'W0122': ('Use of exec',
'exec-used',
'Used when you use the "exec" statement (function for Python '
'3), to discourage its usage. That doesn\'t '
'mean you can not use it !'),
'W0123': ('Use of eval',
'eval-used',
'Used when you use the "eval" function, to discourage its '
'usage. Consider using `ast.literal_eval` for safely evaluating '
'strings containing Python expressions '
'from untrusted sources. '),
'W0141': ('Used builtin function %r',
'bad-builtin',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'star-args',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
'Used when a break or a return statement is found inside the '
'finally clause of a try...finally block: the exceptions raised '
'in the try clause will be silently swallowed instead of being '
're-raised.'),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'C0121': ('Missing required attribute "%s"', # W0103
'missing-module-attribute',
'Used when an attribute required for modules is missing.'),
'E0109': ('Missing argument to reversed()',
'missing-reversed-argument',
'Used when reversed() builtin didn\'t receive an argument.'),
'E0111': ('The first reversed() argument is not a sequence',
'bad-reversed-sequence',
'Used when the first argument to reversed() builtin '
'isn\'t a sequence (does not implement __reversed__, '
'nor __getitem__ and __len__'),
}
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('bad-functions',
{'default' : BAD_FUNCTIONS,
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = (('RP0101', 'Statistics by type', report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
@check_messages('missing-module-attribute')
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
for attr in self.config.required_attributes:
if attr not in node:
self.add_message('missing-module-attribute', node=node, args=attr)
def visit_class(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@check_messages('pointless-statement', 'pointless-string-statement',
'expression-not-assigned')
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value,
six.string_types):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(scope, (astroid.Class, astroid.Module, astroid.Function)):
if isinstance(scope, astroid.Function) and scope.name != '__init__':
pass
else:
sibling = expr.previous_sibling()
if (sibling is not None and sibling.scope() is scope and
isinstance(sibling, astroid.Assign)):
return
self.add_message('pointless-string-statement', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (isinstance(expr, (astroid.Yield, astroid.CallFunc)) or
(isinstance(node.parent, astroid.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astroid.CallFunc)):
self.add_message('expression-not-assigned', node=node,
args=expr.as_string())
else:
self.add_message('pointless-statement', node=node)
@check_messages('unnecessary-lambda')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astroid >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astroid.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astroid.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in range(len(ordinary_args)):
if not isinstance(call.args[i], astroid.Name):
return
if node.args.args[i].name != call.args[i].name:
return
if (isinstance(node.body.func, astroid.Getattr) and
isinstance(node.body.func.expr, astroid.CallFunc)):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
self.add_message('unnecessary-lambda', line=node.fromlineno, node=node)
@check_messages('dangerous-default-value')
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
# check for dangerous default values as arguments
for default in node.args.defaults:
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (isinstance(value, astroid.Instance) and
value.qname() in DEFAULT_ARGUMENT_SYMBOLS):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif type(value) is astroid.Instance:
if isinstance(default, astroid.CallFunc):
# this argument is direct call to list() or dict() etc
msg = '%s() (%s)' % (value.name, value.qname())
else:
# this argument is a variable from somewhere else which turns
# out to be a list or dict
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
# this argument is a name
msg = '%s (%s)' % (default.as_string(),
DEFAULT_ARGUMENT_SYMBOLS[value.qname()])
self.add_message('dangerous-default-value', node=node, args=(msg,))
@check_messages('unreachable', 'lost-exception')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astroid.Function,))
@check_messages('unreachable')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('unreachable', 'lost-exception')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,))
@check_messages('unreachable')
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('exec-used')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('exec-used', node=node)
@check_messages('bad-builtin', 'star-args', 'eval-used',
'exec-used', 'missing-reversed-argument',
'bad-reversed-sequence')
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name == 'exec':
self.add_message('exec-used', node=node)
elif name == 'reversed':
self._check_reversed(node)
elif name == 'eval':
self.add_message('eval-used', node=node)
if name in self.config.bad_functions:
self.add_message('bad-builtin', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astroid.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # star-args can be skipped
self.add_message('star-args', node=node.func)
@check_messages('assert-on-tuple')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astroid.Tuple) and \
len(node.test.elts) == 2:
self.add_message('assert-on-tuple', node=node)
@check_messages('duplicate-key')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message('duplicate-key', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('unreachable', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('lost-exception', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = safe_infer(get_argument_from_call(node, position=0))
except NoSuchArgumentError:
self.add_message('missing-reversed-argument', node=node)
else:
if argument is astroid.YES:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.CallFunc):
try:
func = next(node.args[0].func.infer())
except InferenceError:
return
if (getattr(func, 'name', None) == 'iter' and
is_builtin_object(func)):
self.add_message('bad-reversed-sequence', node=node)
return
if isinstance(argument, astroid.Instance):
if (argument._proxied.name == 'dict' and
is_builtin_object(argument._proxied)):
self.add_message('bad-reversed-sequence', node=node)
return
elif any(ancestor.name == 'dict' and is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()):
# mappings aren't accepted by reversed()
self.add_message('bad-reversed-sequence', node=node)
return
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
# Check if it is a .deque. It doesn't seem that
# we can retrieve special methods
# from C implemented constructs.
if argument._proxied.qname().endswith(".deque"):
return
self.add_message('bad-reversed-sequence', node=node)
elif not isinstance(argument, (astroid.List, astroid.Tuple)):
# everything else is not a proper sequence for reversed()
self.add_message('bad-reversed-sequence', node=node)
_NAME_TYPES = {
'module': (MOD_NAME_RGX, 'module'),
'const': (CONST_NAME_RGX, 'constant'),
'class': (CLASS_NAME_RGX, 'class'),
'function': (DEFAULT_NAME_RGX, 'function'),
'method': (DEFAULT_NAME_RGX, 'method'),
'attr': (DEFAULT_NAME_RGX, 'attribute'),
'argument': (DEFAULT_NAME_RGX, 'argument'),
'variable': (DEFAULT_NAME_RGX, 'variable'),
'class_attribute': (CLASS_ATTRIBUTE_RGX, 'class attribute'),
'inlinevar': (COMP_VAR_RGX, 'inline iteration'),
}
def _create_naming_options():
name_options = []
for name_type, (rgx, human_readable_name) in six.iteritems(_NAME_TYPES):
name_type = name_type.replace('_', '-')
name_options.append((
'%s-rgx' % (name_type,),
{'default': rgx, 'type': 'regexp', 'metavar': '<regexp>',
'help': 'Regular expression matching correct %s names' % (human_readable_name,)}))
name_options.append((
'%s-name-hint' % (name_type,),
{'default': rgx.pattern, 'type': 'string', 'metavar': '<string>',
'help': 'Naming hint for %s names' % (human_readable_name,)}))
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized '
'names).'),
'C0103': ('Invalid %s name "%s"%s',
'invalid-name',
'Used when the name doesn\'t match the regular expression '
'associated to its type (constant, variable, class...).'),
}
options = (('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
('name-group',
{'default' : (),
'type' :'csv', 'metavar' : '<name1:name2>',
'help' : ('Colon-delimited sets of names that determine each'
' other\'s naming style when the name regexes'
' allow several styles.')}
),
('include-naming-hint',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help': 'Include a hint for the correct naming format with invalid-name'}
),
) + _create_naming_options()
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0)
for group in self.config.name_group:
for name_type in group.split(':'):
self._name_group[name_type] = 'group_%s' % (group,)
@check_messages('blacklisted-name', 'invalid-name')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in six.itervalues(self._bad_names):
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in six.itervalues(all_groups):
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group))
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@check_messages('blacklisted-name', 'invalid-name')
def visit_class(self, node):
self._check_name('class', node.name, node)
for attr, anodes in six.iteritems(node.instance_attrs):
if not list(node.instance_attr_ancestors(attr)):
self._check_name('attr', attr, anodes[0])
@check_messages('blacklisted-name', 'invalid-name')
def visit_function(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
confidence = HIGH
if node.is_method():
if overrides_a_method(node.parent.frame(), node.name):
return
confidence = (INFERENCE if has_known_bases(node.parent.frame())
else INFERENCE_FAILURE)
self._check_name(_determine_function_name_type(node),
node.name, node, confidence)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_global(self, node):
for name in node.names:
self._check_name('const', name, node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, astroid.Comprehension):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(ass_type, astroid.Assign) and not in_loop(ass_type):
if isinstance(safe_infer(ass_type.value), astroid.Class):
self._check_name('class', node.name, node)
else:
if not _redefines_import(node):
# Don't emit if the name redefines an import
# in an ImportError except handler.
self._check_name('const', node.name, node)
elif isinstance(ass_type, astroid.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.Function):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.Class):
if not list(frame.local_attr_ancestors(node.name)):
self._check_name('class_attribute', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(self, node, node_type, name, confidence):
type_label = _NAME_TYPES[node_type][1]
hint = ''
if self.config.include_naming_hint:
hint = ' (hint: %s)' % (getattr(self.config, node_type + '_name_hint'))
self.add_message('invalid-name', node=node, args=(type_label, name, hint),
confidence=confidence)
self.stats['badname_' + node_type] += 1
def _check_name(self, node_type, name, node, confidence=HIGH):
"""check for a name using the type's regexp"""
if is_inside_except(node):
clobbering, _ = clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('blacklisted-name', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None:
self._raise_name_warning(node, node_type, name, confidence)
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing %s docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.'
'Some special methods like __init__ doesn\'t necessary require a '
'docstring.'),
'C0112': ('Empty %s docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty '
'docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'function or class names that do not require a '
'docstring.'}
),
('docstring-min-length',
{'default' : -1,
'type' : 'int', 'metavar' : '<int>',
'help': ('Minimum line length for functions/classes that'
' require docstrings, shorter ones are exempt.')}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
@check_messages('missing-docstring', 'empty-docstring')
def visit_module(self, node):
self._check_docstring('module', node)
@check_messages('missing-docstring', 'empty-docstring')
def visit_class(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
@check_messages('missing-docstring', 'empty-docstring')
def visit_function(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = node.is_method() and 'method' or 'function'
if isinstance(node.parent.frame(), astroid.Class):
overridden = False
confidence = (INFERENCE if has_known_bases(node.parent.frame())
else INFERENCE_FAILURE)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astroid.Function):
overridden = True
break
self._check_docstring(ftype, node,
report_missing=not overridden,
confidence=confidence)
else:
self._check_docstring(ftype, node)
def _check_docstring(self, node_type, node, report_missing=True,
confidence=HIGH):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
if not report_missing:
return
if node.body:
lines = node.body[-1].lineno - node.body[0].lineno + 1
else:
lines = 0
max_lines = self.config.docstring_min_length
if node_type != 'module' and max_lines > -1 and lines < max_lines:
return
self.stats['undocumented_'+node_type] += 1
if (node.body and isinstance(node.body[0], astroid.Discard) and
isinstance(node.body[0].value, astroid.CallFunc)):
# Most likely a string with a format call. Let's see.
func = safe_infer(node.body[0].value.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)):
# Strings in Python 3, others in Python 2.
if PY3K and func.bound.name == 'str':
return
elif func.bound.name in ('str', 'unicode', 'bytes'):
return
self.add_message('missing-docstring', node=node, args=(node_type,),
confidence=confidence)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('empty-docstring', node=node, args=(node_type,),
confidence=confidence)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
@check_messages('unnecessary-pass')
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('unnecessary-pass', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.',
{'maxversion': (3, 0)}),
}
@check_messages('deprecated-lambda')
def visit_callfunc(self, node):
"""visit a CallFunc node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astroid.Lambda):
return
infered = safe_infer(node.func)
if (is_builtin_object(infered)
and infered.name in ['map', 'filter']):
self.add_message('deprecated-lambda', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
|
HackFisher/depot_tools
|
third_party/pylint/checkers/base.py
|
Python
|
bsd-3-clause
| 54,520
|
[
"VisIt"
] |
1d6ae847730414cffc429642b371d19a760af0ea20d5826b06182d9774dbc4df
|
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
import urllib2
class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
try:
r = urllib2.Request(term)
response = urllib2.urlopen(r)
except URLError as e:
utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
continue
except HTTPError as e:
utils.warnings("Recieved HTTP error for %s : %s" % (term, str(e)))
continue
for line in response.read().splitlines():
ret.append(line)
return ret
|
bootswithdefer/ansible
|
v2/ansible/plugins/lookup/url.py
|
Python
|
gpl-3.0
| 1,545
|
[
"Brian"
] |
37072a7781c1159e9dab917e1e4480e497c982cf327ed253ee8a40dde5c872ef
|
########################################################################
# $HeadURL$
# File : DIRACPilotDirector.py
# Author : Ricardo Graciani
########################################################################
"""
Dirac PilotDirector class, it uses DIRAC CE backends to submit and monitor pilots.
It includes:
- basic configuration for Dirac PilotDirector
A DIRAC PilotDirector make use directly to CE methods to place the pilots on the
underlying resources.
"""
__RCSID__ = "$Id$"
import os, sys, tempfile, shutil, time, base64, bz2
from DIRAC.WorkloadManagementSystem.private.PilotDirector import PilotDirector
from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory
from DIRAC.Resources.Computing.ComputingElement import getResourceDict
from DIRAC.Core.Security import CS
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities.DictCache import DictCache
ERROR_CE = 'No CE available'
ERROR_JDL = 'Could not create Pilot script'
ERROR_SCRIPT = 'Could not copy Pilot script'
#COMPUTING_ELEMENTS = ['InProcess']
COMPUTING_ELEMENTS = []
WAITING_TO_RUNNING_RATIO = 0.5
MAX_WAITING_JOBS = 50
MAX_NUMBER_JOBS = 10000
class DIRACPilotDirector(PilotDirector):
"""
DIRAC PilotDirector class
"""
def __init__( self, submitPool ):
"""
Define some defaults and call parent __init__
"""
self.gridMiddleware = 'DIRAC'
PilotDirector.__init__( self, submitPool )
self.computingElementList = COMPUTING_ELEMENTS
self.computingElementDict = {}
self.addComputingElement( self.computingElementList )
self.siteName = gConfig.getValue('/LocalSite/Site','')
if not self.siteName:
self.log.error( 'Can not run a Director if Site Name is not defined' )
sys.exit()
self.__failingCECache = DictCache()
self.__ticketsCECache = DictCache()
def configure(self, csSection, submitPool ):
"""
Here goes common configuration for DIRAC PilotDirector
"""
PilotDirector.configure( self, csSection, submitPool )
self.reloadConfiguration( csSection, submitPool )
self.__failingCECache.purgeExpired()
self.__ticketsCECache.purgeExpired()
for ce in self.__failingCECache.getKeys():
if ce in self.computingElementDict.keys():
try:
del self.computingElementDict[ce]
except:
pass
if self.computingElementDict:
self.log.info( ' ComputingElements:', ', '.join(self.computingElementDict.keys()) )
else:
return
# FIXME: this is to start testing
_ceName, computingElementDict = self.computingElementDict.items()[0]
self.computingElement = computingElementDict['CE']
self.log.debug( self.computingElement.getCEStatus() )
self.log.info( ' SiteName:', self.siteName )
def configureFromSection( self, mySection ):
"""
reload from CS
"""
PilotDirector.configureFromSection( self, mySection )
self.computingElementList = gConfig.getValue( mySection+'/ComputingElements' , self.computingElementList )
self.addComputingElement( self.computingElementList )
self.siteName = gConfig.getValue( mySection+'/SiteName' , self.siteName )
def addComputingElement(self, ceList):
"""
Check if a CE object for the current CE is available,
instantiate one if necessary
"""
for CE in ceList:
if CE not in self.computingElementDict:
ceFactory = ComputingElementFactory( )
ceInstance = ceFactory.getCE( ceName = CE )
if not ceInstance['OK']:
self.log.error('Can not create CE object:', ceInstance['Message'])
return
self.computingElementDict[CE] = ceInstance['Value'].ceConfigDict
# add the 'CE' instance at the end to avoid being overwritten
self.computingElementDict[CE]['CE'] = ceInstance['Value']
def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
"""
This method does the actual pilot submission to the DIRAC CE
The logic is as follows:
- If there are no available CE it return error
- If there is no queue available in the CE's, it returns error
- It creates a temp directory
- It prepare a PilotScript
"""
taskQueueID = taskQueueDict['TaskQueueID']
# ownerDN = taskQueueDict['OwnerDN']
submittedPilots = 0
# if self.computingElement not in self.computingElementDict:
# # Since we can exclude CEs from the list, it may become empty
# return S_ERROR( ERROR_CE )
pilotRequirements = []
pilotRequirements.append( ( 'CPUTime', taskQueueDict['CPUTime'] ) )
# do we need to care about anything else?
pilotRequirementsString = str( pilotRequirements )
# Check that there are available queues for the Jobs:
if self.enableListMatch:
availableQueues = []
# now = Time.dateTime()
cachedAvailableQueues = self.listMatchCache.get( pilotRequirementsString )
if cachedAvailableQueues == False:
availableQueues = self._listQueues( pilotRequirements )
if availableQueues != False:
self.listMatchCache.add( pilotRequirementsString, self.listMatchDelay, availableQueues )
self.log.verbose( 'Available Queues for TaskQueue ', "%s: %s" % ( taskQueueID, str(availableQueues) ) )
else:
availableQueues = cachedAvailableQueues
if not availableQueues:
return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )
baseDir = os.getcwd()
workingDirectory = tempfile.mkdtemp( prefix= 'TQ_%s_' % taskQueueID, dir = workDir )
self.log.verbose( 'Using working Directory:', workingDirectory )
os.chdir( workingDirectory )
# set the Site Name
pilotOptions.append( "-n '%s'" % self.siteName)
# submit pilots for every CE available
for CE in self.computingElementDict.keys():
ceName = CE
computingElement = self.computingElementDict[CE]['CE']
# add possible requirements from Site and CE
for req, val in getResourceDict( ceName ).items():
pilotOptions.append( "-o '/AgentJobRequirements/%s=%s'" % ( req, val ) )
ceConfigDict = self.computingElementDict[CE]
if 'ClientPlatform' in ceConfigDict:
pilotOptions.append( "-p '%s'" % ceConfigDict['ClientPlatform'])
if 'SharedArea' in ceConfigDict:
pilotOptions.append( "-o '/LocalSite/SharedArea=%s'" % ceConfigDict['SharedArea'] )
# if 'CPUScalingFactor' in ceConfigDict:
# pilotOptions.append( "-o '/LocalSite/CPUScalingFactor=%s'" % ceConfigDict['CPUScalingFactor'] )
#
# if 'CPUNormalizationFactor' in ceConfigDict:
# pilotOptions.append( "-o '/LocalSite/CPUNormalizationFactor=%s'" % ceConfigDict['CPUNormalizationFactor'] )
self.log.info( "pilotOptions: ", ' '.join(pilotOptions))
httpProxy = ''
if 'HttpProxy' in ceConfigDict:
httpProxy = ceConfigDict['HttpProxy']
if 'JobExecDir' in ceConfigDict:
pilotExecDir = ceConfigDict['JobExecDir']
try:
pilotScript = self._writePilotScript( workingDirectory, pilotOptions, proxy, httpProxy, pilotExecDir )
except:
self.log.exception( ERROR_SCRIPT )
try:
os.chdir( baseDir )
shutil.rmtree( workingDirectory )
except:
pass
return S_ERROR( ERROR_SCRIPT )
self.log.info("Pilots to submit: ", pilotsToSubmit)
while submittedPilots < pilotsToSubmit:
# Find out how many pilots can be submitted
ret = computingElement.available( )
if not ret['OK']:
self.log.error('Can not determine if pilot should be submitted: ', ret['Message'])
break
maxPilotsToSubmit = ret['Value']
self.log.info("Submit Pilots: ", maxPilotsToSubmit)
if not maxPilotsToSubmit:
break
# submit the pilots and then check again
for _i in range( min( maxPilotsToSubmit, pilotsToSubmit - submittedPilots ) ):
submission = computingElement.submitJob(pilotScript, '', '')
if not submission['OK']:
self.log.error('Pilot submission failed: ', submission['Message'])
# cleanup
try:
os.chdir( baseDir )
shutil.rmtree( workingDirectory )
except:
pass
return S_ERROR('Pilot submission failed after ' + str(submittedPilots) + ' pilots submitted successful')
submittedPilots += 1
# let the batch system some time to digest the submitted job
time.sleep(1)
#next CE
try:
os.chdir( baseDir )
shutil.rmtree( workingDirectory )
except:
pass
return S_OK(submittedPilots)
def _listQueues( self, pilotRequirements ):
"""
For each defined CE return the list of Queues with available, running and waiting slots,
matching the requirements of the pilots.
Currently only CPU time is considered
"""
result = self.computingElement.available( pilotRequirements )
if not result['OK']:
self.log.error( 'Can not determine available queues', result['Message'] )
return False
return result['Value']
def _writePilotScript( self, workingDirectory, pilotOptions, proxy, httpProxy, pilotExecDir ):
"""
Prepare the script to execute the pilot
For the moment it will do like Grid Pilots, a full DIRAC installation
It assumes that the pilot script will have access to the submit working directory
"""
try:
compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace('\n','')
compressedAndEncodedPilot = base64.encodestring( bz2.compress( open( self.pilot, "rb" ).read(), 9 ) ).replace('\n','')
compressedAndEncodedInstall = base64.encodestring( bz2.compress( open( self.install, "rb" ).read(), 9 ) ).replace('\n','')
except:
self.log.exception('Exception during file compression of proxy, dirac-pilot or dirac-install')
return S_ERROR('Exception during file compression of proxy, dirac-pilot or dirac-install')
localPilot = """#!/bin/bash
/usr/bin/env python << EOF
#
import os, tempfile, sys, shutil, base64, bz2
try:
pilotExecDir = '%(pilotExecDir)s'
if not pilotExecDir:
pilotExecDir = None
pilotWorkingDirectory = tempfile.mkdtemp( suffix = 'pilot', prefix = 'DIRAC_', dir = pilotExecDir )
os.chdir( pilotWorkingDirectory )
open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
open( '%(pilotScript)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedPilot)s" ) ) )
open( '%(installScript)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedInstall)s" ) ) )
os.chmod("proxy",0600)
os.chmod("%(pilotScript)s",0700)
os.chmod("%(installScript)s",0700)
if "LD_LIBRARY_PATH" not in os.environ:
os.environ["LD_LIBRARY_PATH"]=""
os.environ["X509_USER_PROXY"]=os.path.join(pilotWorkingDirectory, 'proxy')
if "%(httpProxy)s":
os.environ["HTTP_PROXY"]="%(httpProxy)s"
os.environ["X509_CERT_DIR"]=os.path.join(pilotWorkingDirectory, 'etc/grid-security/certificates')
# TODO: structure the output
print '==========================================================='
print 'Environment of execution host'
for key in os.environ.keys():
print key + '=' + os.environ[key]
print '==========================================================='
except Exception, x:
print >> sys.stderr, x
sys.exit(-1)
cmd = "python %(pilotScript)s %(pilotOptions)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )
shutil.rmtree( pilotWorkingDirectory )
EOF
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy,
'compressedAndEncodedPilot': compressedAndEncodedPilot,
'compressedAndEncodedInstall': compressedAndEncodedInstall,
'httpProxy': httpProxy,
'pilotScript': os.path.basename(self.pilot),
'installScript': os.path.basename(self.install),
'pilotOptions': ' '.join( pilotOptions ),
'pilotExecDir': pilotExecDir }
fd, name = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir=workingDirectory)
pilotWrapper = os.fdopen(fd, 'w')
pilotWrapper.write( localPilot )
pilotWrapper.close()
return name
def _getPilotProxyFromDIRACGroup( self, ownerDN, ownerGroup, requiredTimeLeft ):
"""
Download a limited pilot proxy with VOMS extensions depending on the group
"""
#Assign VOMS attribute
vomsAttr = CS.getVOMSAttributeForGroup( ownerGroup )
if not vomsAttr:
self.log.info( "Downloading a proxy without VOMS extensions for %s@%s" % ( ownerDN, ownerGroup ) )
return gProxyManager.downloadProxy( ownerDN, ownerGroup, limited = True,
requiredTimeLeft = requiredTimeLeft )
else:
self.log.info( "Downloading a proxy with '%s' VOMS extension for %s@%s" % ( vomsAttr, ownerDN, ownerGroup ) )
return gProxyManager.downloadVOMSProxy( ownerDN,
ownerGroup,
limited = True,
requiredTimeLeft = requiredTimeLeft,
requiredVOMSAttribute = vomsAttr )
|
sposs/DIRAC
|
WorkloadManagementSystem/private/DIRACPilotDirector.py
|
Python
|
gpl-3.0
| 13,504
|
[
"DIRAC"
] |
ec9061551a2ca155f946deabb775244e96f838d1739d5a963d3b26ac759bb4d4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import os
import tempfile
from freeseer.framework.config import options
from freeseer.framework.config.core import Config
class TestConfig(Config):
option1 = options.StringOption('hello')
option2 = options.IntegerOption(1337)
class ConfigStorageTest(object):
"""Base class for testing filesystem-based ConfigStorage classes."""
CONFIG_STORAGE_CLASS = None
INITIAL_LOAD_CONFIG = ''
AFTER_STORE_CONFIG = ''
def setUp(self):
fd_int, self.filepath = tempfile.mkstemp()
os.close(fd_int)
os.remove(self.filepath)
self.storage = self.CONFIG_STORAGE_CLASS(self.filepath)
self.config = TestConfig()
def tearDown(self):
os.remove(self.filepath)
def test_load(self):
"""Tests that load(...) correctly populates a TestConfig instance using a CONFIG_STORAGE_CLASS instance."""
self.storage.load(self.config, 'this_section')
self.assertEqual(self.config.option1, 'hello')
self.assertEqual(self.config.option2, 1337)
with open(self.filepath, 'w') as fd:
fd.write(self.INITIAL_LOAD_CONFIG)
self.storage.load(self.config, 'this_section')
self.assertEqual(self.config.option1, 'othello')
self.assertEqual(self.config.option2, 0)
def test_store(self):
"""Tests that store(...) correctly persists a TestConfig instance using a CONFIG_STORAGE_CLASS instance."""
self.config.option1 = 'something_new'
self.config.option2 = 10
self.storage.store(self.config, 'this_section')
with open(self.filepath) as fd:
self.assertEqual(fd.read(), self.AFTER_STORE_CONFIG)
|
Freeseer/freeseer
|
src/freeseer/tests/framework/config/persist/__init__.py
|
Python
|
gpl-3.0
| 2,620
|
[
"VisIt"
] |
ccb36ba990715e4eef9050d50a9c6360d1db55c6159361fbfc6dc760e59f174f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
"""
import os.path
import math
import logging
import rmgpy.constants as constants
from rmgpy.species import Species
from copy import deepcopy
from base import Database, Entry, makeLogicNode, DatabaseError
from rmgpy.molecule import Molecule, Atom, Bond, Group, atomTypes
################################################################################
def saveEntry(f, entry):
"""
Write a Pythonic string representation of the given `entry` in the solvation
database to the file object `f`.
"""
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
f.write(' label = "{0}",\n'.format(entry.label))
if isinstance(entry.item, Molecule):
if Molecule(SMILES=entry.item.toSMILES()).isIsomorphic(entry.item):
# The SMILES representation accurately describes the molecule, so we can save it that way.
f.write(' molecule = "{0}",\n'.format(entry.item.toSMILES()))
else:
f.write(' molecule = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList(removeH=False))
f.write('""",\n')
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
elif entry.item is not None:
f.write(' group = "{0}",\n'.format(entry.item))
if isinstance(entry.data, SoluteData):
f.write(' solute = SoluteData(\n')
f.write(' S = {0!r},\n'.format(entry.data.S))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' E = {0!r},\n'.format(entry.data.E))
f.write(' L = {0!r},\n'.format(entry.data.L))
f.write(' A = {0!r},\n'.format(entry.data.A))
if entry.data.V is not None: f.write(' V = {0!r},\n'.format(entry.data.V))
f.write(' ),\n')
elif isinstance(entry.data, SolventData):
f.write(' solvent = SolventData(\n')
f.write(' s_g = {0!r},\n'.format(entry.data.s_g))
f.write(' b_g = {0!r},\n'.format(entry.data.b_g))
f.write(' e_g = {0!r},\n'.format(entry.data.e_g))
f.write(' l_g = {0!r},\n'.format(entry.data.l_g))
f.write(' a_g = {0!r},\n'.format(entry.data.a_g))
f.write(' c_g = {0!r},\n'.format(entry.data.c_g))
f.write(' s_h = {0!r},\n'.format(entry.data.s_h))
f.write(' b_h = {0!r},\n'.format(entry.data.b_h))
f.write(' e_h = {0!r},\n'.format(entry.data.e_h))
f.write(' l_h = {0!r},\n'.format(entry.data.l_h))
f.write(' a_h = {0!r},\n'.format(entry.data.a_h))
f.write(' c_h = {0!r},\n'.format(entry.data.c_h))
f.write(' A = {0!r},\n'.format(entry.data.A))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' C = {0!r},\n'.format(entry.data.C))
f.write(' D = {0!r},\n'.format(entry.data.D))
f.write(' E = {0!r},\n'.format(entry.data.E))
f.write(' alpha = {0!r},\n'.format(entry.data.alpha))
f.write(' beta = {0!r},\n'.format(entry.data.beta))
f.write(' eps = {0!r},\n'.format(entry.data.eps))
f.write(' ),\n')
elif entry.data is None:
f.write(' solute = None,\n')
else:
raise DatabaseError("Not sure how to save {0!r}".format(entry.data))
f.write(' shortDesc = u"""')
try:
f.write(entry.shortDesc.encode('utf-8'))
except:
f.write(entry.shortDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except:
f.write(entry.longDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(')\n\n')
def generateOldLibraryEntry(data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
raise NotImplementedError()
def processOldLibraryEntry(data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
raise NotImplementedError()
class SolventData():
"""
Stores Abraham/Mintz parameters for characterizing a solvent.
"""
def __init__(self, s_h=None, b_h=None, e_h=None, l_h=None, a_h=None,
c_h=None, s_g=None, b_g=None, e_g=None, l_g=None, a_g=None, c_g=None, A=None, B=None,
C=None, D=None, E=None, alpha=None, beta=None, eps=None):
self.s_h = s_h
self.b_h = b_h
self.e_h = e_h
self.l_h = l_h
self.a_h = a_h
self.c_h = c_h
self.s_g = s_g
self.b_g = b_g
self.e_g = e_g
self.l_g = l_g
self.a_g = a_g
self.c_g = c_g
# These are parameters for calculating viscosity
self.A = A
self.B = B
self.C = C
self.D = D
self.E = E
# These are SOLUTE parameters used for intrinsic rate correction in H-abstraction rxns
self.alpha = alpha
self.beta = beta
# This is the dielectric constant
self.eps = eps
def getHAbsCorrection(self):
"""
If solvation is on, this will give the log10 of the ratio of the intrinsic rate
constants log10(k_sol/k_gas) for H-abstraction rxns
"""
return -8.3*self.alpha*self.beta
def getSolventViscosity(self, T):
"""
Returns the viscosity in Pa s, according to correlation in Perry's Handbook
and coefficients in DIPPR
"""
return math.exp(self.A + (self.B / T) + (self.C*math.log(T)) + (self.D * (T**self.E)))
class SolvationCorrection():
"""
Stores corrections for enthalpy, entropy, and Gibbs free energy when a species is solvated.
Enthalpy and Gibbs free energy is in J/mol; entropy is in J/mol/K
"""
def __init__(self, enthalpy=None, gibbs=None, entropy=None):
self.enthalpy = enthalpy
self.entropy = entropy
self.gibbs = gibbs
class SoluteData():
"""
Stores Abraham parameters to characterize a solute
"""
def __init__(self, S=None, B=None, E=None, L=None, A=None, V=None, comment=""):
self.S = S
self.B = B
self.E = E
self.L = L
self.A = A
self.V = V
self.comment = comment
def __repr__(self):
return "SoluteData(S={0},B={1},E={2},L={3},A={4},comment={5!r})".format(self.S, self.B, self.E, self.L, self.A, self.comment)
def getStokesDiffusivity(self, T, solventViscosity):
"""
Get diffusivity of solute using the Stokes-Einstein sphere relation.
Radius is found from the McGowan volume.
solventViscosity should be given in kg/s/m which equals Pa.s
(water is about 9e-4 Pa.s at 25C, propanol is 2e-3 Pa.s)
Returns D in m2/s
"""
radius = math.pow((75*self.V/constants.pi/constants.Na),(1.0/3.0))/100 # in meters, V is in MgGowan volume in cm3/mol/100
D = constants.kB*T/6/constants.pi/solventViscosity/radius # m2/s
return D # m2/s
def setMcGowanVolume(self, species):
"""
Find and store the McGowan's Volume
Returned volumes are in cm^3/mol/100 (see note below)
See Table 2 in Abraham & McGowan, Chromatographia Vol. 23, No. 4, p. 243. April 1987
doi: 10.1007/BF02311772
"V is scaled to have similar values to the other
descriptors by division by 100 and has units of (cm3mol−1/100)."
the contibutions in this function are in cm3/mol, and the division by 100 is done at the very end.
"""
molecule = species.molecule[0] # any will do, use the first.
Vtot = 0
for atom in molecule.atoms:
thisV = 0.0
if atom.isCarbon():
thisV = 16.35
elif (atom.element.number == 7): # nitrogen, do this way if we don't have an isElement method
thisV = 14.39
elif atom.isOxygen():
thisV = 12.43
elif atom.isHydrogen():
thisV = 8.71
elif (atom.element.number == 16):
thisV = 22.91
else:
raise Exception()
Vtot = Vtot + thisV
for bond in molecule.getBonds(atom):
# divide contribution in half since all bonds would be counted twice this way
Vtot = Vtot - 6.56/2
self.V= Vtot / 100; # division by 100 to get units correct.
################################################################################
################################################################################
class SolventLibrary(Database):
"""
A class for working with a RMG solvent library.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
solvent,
molecule=None,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
spc = molecule
if molecule is not None:
try:
spc = Species().fromSMILES(molecule)
except:
logging.debug("Solvent '{0}' does not have a valid SMILES '{1}'" .format(label, molecule))
try:
spc = Species().fromAdjacencyList(molecule)
except:
logging.error("Can't understand '{0}' in solute library '{1}'".format(molecule, self.name))
raise
spc.generateResonanceIsomers()
self.entries[label] = Entry(
index = index,
label = label,
item = spc,
data = solvent,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def load(self, path):
"""
Load the solvent library from the given path
"""
Database.load(self, path, local_context={'SolventData': SolventData}, global_context={})
def saveEntry(self, f, entry):
"""
Write the given `entry` in the solute database to the file object `f`.
"""
return saveEntry(f, entry)
def getSolventData(self, label):
"""
Get a solvent's data from its name
"""
return self.entries[label].data
def getSolventStructure(self, label):
"""
Get a solvent's molecular structure as SMILES or adjacency list from its name
"""
return self.entries[label].item
class SoluteLibrary(Database):
"""
A class for working with a RMG solute library. Not currently used.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
molecule,
solute,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
try:
spc = Species().fromSMILES(molecule)
except:
logging.debug("Solute '{0}' does not have a valid SMILES '{1}'" .format(label, molecule))
try:
spc = Species().fromAdjacencyList(molecule)
except:
logging.error("Can't understand '{0}' in solute library '{1}'".format(molecule,self.name))
raise
self.entries[label] = Entry(
index = index,
label = label,
item = spc,
data = solute,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def load(self, path):
"""
Load the solute library from the given path
"""
Database.load(self, path, local_context={'SoluteData': SoluteData}, global_context={})
def saveEntry(self, f, entry):
"""
Write the given `entry` in the solute database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class SoluteGroups(Database):
"""
A class for working with an RMG solute group additivity database.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
group,
solute,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = solute,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class SolvationDatabase(object):
"""
A class for working with the RMG solvation database.
"""
def __init__(self):
self.libraries = {}
self.libraries['solvent'] = SolventLibrary()
self.libraries['solute'] = SoluteLibrary()
self.groups = {}
self.local_context = {
'SoluteData': SoluteData,
'SolventData': SolventData
}
self.global_context = {}
def __reduce__(self):
"""
A helper function used when pickling a SolvationDatabase object.
"""
d = {
'libraries': self.libraries,
'groups': self.groups,
}
return (SolvationDatabase, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling a SolvationDatabase object.
"""
self.libraries = d['libraries']
self.groups = d['groups']
def load(self, path, libraries=None, depository=True):
"""
Load the solvation database from the given `path` on disk, where `path`
points to the top-level folder of the solvation database.
Load the solvent and solute libraries, then the solute groups.
"""
self.libraries['solvent'].load(os.path.join(path,'libraries','solvent.py'))
self.libraries['solute'].load(os.path.join(path,'libraries','solute.py'))
self.loadGroups(os.path.join(path, 'groups'))
def getSolventData(self, solvent_name):
try:
solventData = self.libraries['solvent'].getSolventData(solvent_name)
except:
raise DatabaseError('Solvent {0!r} not found in database'.format(solvent_name))
return solventData
def getSolventStructure(self, solvent_name):
try:
solventStructure = self.libraries['solvent'].getSolventStructure(solvent_name)
except:
raise DatabaseError('Solvent {0!r} not found in database'.format(solvent_name))
return solventStructure
def loadGroups(self, path):
"""
Load the solute database from the given `path` on disk, where `path`
points to the top-level folder of the solute database.
Three sets of groups for additivity, atom-centered ('abraham'), non atom-centered
('nonacentered'), and radical corrections ('radical')
"""
logging.info('Loading Platts additivity group database from {0}...'.format(path))
self.groups = {}
self.groups['abraham'] = SoluteGroups(label='abraham').load(os.path.join(path, 'abraham.py' ), self.local_context, self.global_context)
self.groups['nonacentered'] = SoluteGroups(label='nonacentered').load(os.path.join(path, 'nonacentered.py' ), self.local_context, self.global_context)
self.groups['radical'] = SoluteGroups(label='radical').load(os.path.join(path, 'radical.py' ), self.local_context, self.global_context)
def save(self, path):
"""
Save the solvation database to the given `path` on disk, where `path`
points to the top-level folder of the solvation database.
"""
path = os.path.abspath(path)
if not os.path.exists(path): os.mkdir(path)
self.saveLibraries(os.path.join(path, 'libraries'))
self.saveGroups(os.path.join(path, 'groups'))
def saveLibraries(self, path):
"""
Save the solute libraries to the given `path` on disk, where `path`
points to the top-level folder of the solute libraries.
"""
if not os.path.exists(path): os.mkdir(path)
for library in self.libraries.keys():
self.libraries[library].save(os.path.join(path, library+'.py'))
def saveGroups(self, path):
"""
Save the solute groups to the given `path` on disk, where `path`
points to the top-level folder of the solute groups.
"""
if not os.path.exists(path): os.mkdir(path)
for group in self.groups.keys():
self.groups[group].save(os.path.join(path, group+'.py'))
def loadOld(self, path):
"""
Load the old RMG solute database from the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
for (root, dirs, files) in os.walk(os.path.join(path, 'thermo_libraries')):
if os.path.exists(os.path.join(root, 'Dictionary.txt')) and os.path.exists(os.path.join(root, 'Library.txt')):
library = SoluteLibrary(label=os.path.basename(root), name=os.path.basename(root))
library.loadOld(
dictstr = os.path.join(root, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(root, 'Library.txt'),
numParameters = 5,
numLabels = 1,
pattern = False,
)
library.label = os.path.basename(root)
self.libraries[library.label] = library
self.groups = {}
self.groups['abraham'] = SoluteGroups(label='abraham', name='Platts Group Additivity Values for Abraham Solute Descriptors').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Abraham_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Abraham_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Abraham_Library.txt'),
numParameters = 5,
numLabels = 1,
pattern = True,
)
def saveOld(self, path):
"""
Save the old RMG Abraham database to the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# Depository not used in old database, so it is not saved
librariesPath = os.path.join(path, 'thermo_libraries')
if not os.path.exists(librariesPath): os.mkdir(librariesPath)
for library in self.libraries.values():
libraryPath = os.path.join(librariesPath, library.label)
if not os.path.exists(libraryPath): os.mkdir(libraryPath)
library.saveOld(
dictstr = os.path.join(libraryPath, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(libraryPath, 'Library.txt'),
)
groupsPath = os.path.join(path, 'thermo_groups')
if not os.path.exists(groupsPath): os.mkdir(groupsPath)
self.groups['abraham'].saveOld(
dictstr = os.path.join(groupsPath, 'Abraham_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Abraham_Tree.txt'),
libstr = os.path.join(groupsPath, 'Abraham_Library.txt'),
)
def getSoluteData(self, species):
"""
Return the solute descriptors for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before falling back to
estimation via Platts group additivity.
"""
soluteData = None
# Check the library first
soluteData = self.getSoluteDataFromLibrary(species, self.libraries['solute'])
if soluteData is not None:
assert len(soluteData)==3, "soluteData should be a tuple (soluteData, library, entry)"
soluteData[0].comment += "Data from solute library"
soluteData = soluteData[0]
else:
# Solute not found in any loaded libraries, so estimate
soluteData = self.getSoluteDataFromGroups(species)
# No Platts group additivity for V, so set using atom sizes
soluteData.setMcGowanVolume(species)
# Return the resulting solute parameters S, B, E, L, A
return soluteData
def getAllSoluteData(self, species):
"""
Return all possible sets of Abraham solute descriptors for a given
:class:`Species` object `species`. The hits from the library come
first, then the group additivity estimate. This method is useful
for a generic search job. Right now, there should either be 1 or
2 sets of descriptors, depending on whether or not we have a
library entry.
"""
soluteDataList = []
# Data from solute library
data = self.getSoluteDataFromLibrary(species, self.libraries['solute'])
if data is not None:
assert len(data) == 3, "soluteData should be a tuple (soluteData, library, entry)"
data[0].comment += "Data from solute library"
soluteDataList.append(data)
# Estimate from group additivity
# Make it a tuple
data = (self.getSoluteDataFromGroups(species), None, None)
soluteDataList.append(data)
return soluteDataList
def getSoluteDataFromLibrary(self, species, library):
"""
Return the set of Abraham solute descriptors corresponding to a given
:class:`Species` object `species` from the specified solute
`library`. If `library` is a string, the list of libraries is searched
for a library with that name. If no match is found in that library,
``None`` is returned. If no corresponding library is found, a
:class:`DatabaseError` is raised.
"""
for label, entry in library.entries.iteritems():
if species.isIsomorphic(entry.item) and entry.data is not None:
return (deepcopy(entry.data), library, entry)
return None
def getSoluteDataFromGroups(self, species):
"""
Return the set of Abraham solute parameters corresponding to a given
:class:`Species` object `species` by estimation using the Platts group
additivity method. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
It averages (linearly) over the desciptors for each Molecule (resonance isomer)
in the Species.
"""
soluteData = SoluteData(0.0,0.0,0.0,0.0,0.0)
count = 0
comments = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
molecule.updateAtomTypes()
sdata = self.estimateSoluteViaGroupAdditivity(molecule)
soluteData.S += sdata.S
soluteData.B += sdata.B
soluteData.E += sdata.E
soluteData.L += sdata.L
soluteData.A += sdata.A
count += 1
comments.append(sdata.comment)
soluteData.S /= count
soluteData.B /= count
soluteData.E /= count
soluteData.L /= count
soluteData.A /= count
# Print groups that are used for debugging purposes
soluteData.comment = "Average of {0}".format(" and ".join(comments))
return soluteData
def transformLonePairs(self, molecule):
"""
Changes lone pairs in a molecule to two radicals for purposes of finding
solute data via group additivity. Transformed for each atom based on valency.
"""
saturatedStruct = molecule.copy(deep=True)
addedToPairs = {}
for atom in saturatedStruct.atoms:
addedToPairs[atom] = 0
if atom.lonePairs > 0:
charge = atom.charge # Record this so we can conserve it when checking
bonds = saturatedStruct.getBonds(atom)
sumBondOrders = 0
for key, bond in bonds.iteritems():
if bond.order == 'S': sumBondOrders += 1
if bond.order == 'D': sumBondOrders += 2
if bond.order == 'T': sumBondOrders += 3
if bond.order == 'B': sumBondOrders += 1.5 # We should always have 2 'B' bonds (but what about Cbf?)
if atomTypes['Val4'] in atom.atomType.generic: # Carbon, Silicon
while(atom.radicalElectrons + charge + sumBondOrders < 4):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val5'] in atom.atomType.generic: # Nitrogen
while(atom.radicalElectrons + charge + sumBondOrders < 3):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val6'] in atom.atomType.generic: # Oxygen, sulfur
while(atom.radicalElectrons + charge + sumBondOrders < 2):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val7'] in atom.atomType.generic: # Chlorine
while(atom.radicalElectrons + charge + sumBondOrders < 1):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
saturatedStruct.update()
saturatedStruct.updateLonePairs()
return saturatedStruct, addedToPairs
def removeHBonding(self, saturatedStruct, addedToRadicals, addedToPairs, soluteData):
# Remove hydrogen bonds and restore the radical
for atom in addedToRadicals:
for H, bond in addedToRadicals[atom]:
saturatedStruct.removeBond(bond)
saturatedStruct.removeAtom(H)
atom.incrementRadical()
# Change transformed lone pairs back
for atom in addedToPairs:
if addedToPairs[atom] > 0:
for pair in range(1, addedToPairs[atom]):
saturatedStruct.decrementRadical()
saturatedStruct.decrementRadical()
saturatedStruct.incrementLonePairs()
# Update Abraham 'A' H-bonding parameter for unsaturated struct
for atom in saturatedStruct.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen() and atom.radicalElectrons > 0:
for electron in range(1, atom.radicalElectrons):
# Get solute data for radical group
try:
self.__addGroupSoluteData(soluteData, self.groups['radical'], saturatedStruct, {'*':atom})
except KeyError: pass
return soluteData
def estimateSoluteViaGroupAdditivity(self, molecule):
"""
Return the set of Abraham solute parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the Platts' group
additivity method. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
"""
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortAtoms()
# Create the SoluteData object with the intercepts from the Platts groups
soluteData = SoluteData(
S = 0.277,
B = 0.071,
E = 0.248,
L = 0.13,
A = 0.003
)
addedToRadicals = {} # Dictionary of key = atom, value = dictionary of {H atom: bond}
addedToPairs = {} # Dictionary of key = atom, value = # lone pairs changed
saturatedStruct = molecule.copy(deep=True)
# Convert lone pairs to radicals, then saturate with H.
# Change lone pairs to radicals based on valency
if sum([atom.lonePairs for atom in saturatedStruct.atoms]) > 0: # molecule contains lone pairs
saturatedStruct, addedToPairs = self.transformLonePairs(saturatedStruct)
# Now saturate radicals with H
if sum([atom.radicalElectrons for atom in saturatedStruct.atoms]) > 0: # radical species
addedToRadicals = saturatedStruct.saturate()
# Saturated structure should now have no unpaired electrons, and only "expected" lone pairs
# based on the valency
for atom in saturatedStruct.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen():
# Get initial solute data from main group database. Every atom must
# be found in the main abraham database
try:
self.__addGroupSoluteData(soluteData, self.groups['abraham'], saturatedStruct, {'*':atom})
except KeyError:
logging.error("Couldn't find in main abraham database:")
logging.error(saturatedStruct)
logging.error(saturatedStruct.toAdjacencyList())
raise
# Get solute data for non-atom centered groups (being found in this group
# database is optional)
try:
self.__addGroupSoluteData(soluteData, self.groups['nonacentered'], saturatedStruct, {'*':atom})
except KeyError: pass
soluteData = self.removeHBonding(saturatedStruct, addedToRadicals, addedToPairs, soluteData)
return soluteData
def __addGroupSoluteData(self, soluteData, database, molecule, atom):
"""
Determine the Platts group additivity solute data for the atom `atom`
in the structure `structure`, and add it to the existing solute data
`soluteData`.
"""
node0 = database.descendTree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node is not None and node.data is None:
node = node.parent
if node is None:
raise KeyError('Node has no parent with data in database.')
data = node.data
comment = node.label
while isinstance(data, basestring) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
comment = '{0}({1})'.format(database.label, comment)
# This code prints the hierarchy of the found node; useful for debugging
#result = ''
#while node is not None:
# result = ' -> ' + node + result
# node = database.tree.parent[node]
#print result[4:]
# Add solute data for each atom to the overall solute data for the molecule.
soluteData.S += data.S
soluteData.B += data.B
soluteData.E += data.E
soluteData.L += data.L
soluteData.A += data.A
soluteData.comment += comment + "+"
return soluteData
def calcH(self, soluteData, solventData):
"""
Returns the enthalpy of solvation, at 298K, in J/mol
"""
# Use Mintz parameters for solvents. Multiply by 1000 to go from kJ->J to maintain consistency
delH = 1000*((soluteData.S*solventData.s_h)+(soluteData.B*solventData.b_h)+(soluteData.E*solventData.e_h)+(soluteData.L*solventData.l_h)+(soluteData.A*solventData.a_h)+solventData.c_h)
return delH
def calcG(self, soluteData, solventData):
"""
Returns the Gibbs free energy of solvation, at 298K, in J/mol
"""
# Use Abraham parameters for solvents to get log K
logK = (soluteData.S*solventData.s_g)+(soluteData.B*solventData.b_g)+(soluteData.E*solventData.e_g)+(soluteData.L*solventData.l_g)+(soluteData.A*solventData.a_g)+solventData.c_g
# Convert to delG with units of J/mol
delG = -8.314*298*2.303*logK
return delG
def calcS(self, delG, delH):
"""
Returns the entropy of solvation, at 298K, in J/mol/K
"""
delS = (delH-delG)/298
return delS
def getSolvationCorrection(self, soluteData, solventData):
"""
Given a soluteData and solventData object, calculates the enthalpy, entropy,
and Gibbs free energy of solvation at 298 K. Returns a SolvationCorrection
object
"""
correction = SolvationCorrection(0.0, 0.0, 0.0)
correction.enthalpy = self.calcH(soluteData, solventData)
correction.gibbs = self.calcG(soluteData, solventData)
correction.entropy = self.calcS(correction.gibbs, correction.enthalpy)
return correction
def checkSolventinInitialSpecies(self,rmg,solventStructure):
"""
Given the instance of RMG class and the solventStructure, it checks whether the solvent is listed as one
of the initial species.
If the SMILES / adjacency list for all the solvents exist in the solvent library, it uses the solvent's
molecular structure to determine whether the species is the solvent or not.
If the solvent library does not have SMILES / adjacency list, then it uses the solvent's string name
to determine whether the species is the solvent or not
"""
for spec in rmg.initialSpecies:
if solventStructure is not None:
spec.isSolvent = spec.isIsomorphic(solventStructure)
else:
spec.isSolvent = rmg.solvent == spec.label
if not any([spec.isSolvent for spec in rmg.initialSpecies]):
if solventStructure is not None:
logging.info('One of the initial species must be the solvent')
raise Exception('One of the initial species must be the solvent')
else:
logging.info('One of the initial species must be the solvent with the same string name')
raise Exception('One of the initial species must be the solvent with the same string name')
|
pierrelb/RMG-Py
|
rmgpy/data/solvation.py
|
Python
|
mit
| 39,130
|
[
"VisIt"
] |
393324c4d67a5b6fa12b15b11c8e51bdc021ba379b9c01daa1eb59d0d20bf084
|
import numpy as np
from numpy.lib.stride_tricks import as_strided
from fft_correlator import FFTCorrelator
from grid_spec import GridSpec
class DirectPIV(object):
"""
Class for the initial Piv.
The images are error checked and padded if needed.
After initialization a GridSpec is set and a grid is created.
It creates an object of the GridSpec for the images.
By calling the function
.. function:: piv.direct_piv.DirectPIV.correlate_frames
the correlation is calculated.
As a result of the process the velocities are set as attributes of the class.
"""
def __init__(self, image_a, image_b, window_size=32, search_size=32, distance=16):
"""
Initialization of the class.
:param image_a: first image to be evaluated
:param image_b: second image to be evaluated
:param int window_size: size of the interrogation window on first image
:param int search_size: size of the search window on second image
:param int distance: distance between beginning if first interrogation window and second
"""
image_a, image_b = self._check_images(image_a, image_b)
self.grid_spec = GridSpec(image_a.shape, image_a.strides,
window_size, search_size, distance)
self._correlator = FFTCorrelator(window_size, search_size)
self._set_images(image_a, image_b)
self.u = np.zeros(self.grid_spec.get_grid_shape())
self.v = np.zeros_like(self.u)
self._grid_creator()
def _check_images(self, img_a, img_b):
"""
Function for checking weather the images have the correct type (float64/double).
The shape and strides are compared as well.
parameter:
img_a: first image
img_b: second image
Error:
ValueError: shape or strides don't match
Return:
images in same order as input
"""
img_a, img_b = img_a.astype('float64'), img_b.astype('float64')
if img_a.shape != img_b.shape:
raise ValueError('Shape of the Images is not matching!')
if img_a.strides != img_b.strides:
raise ValueError('Stride of the Images is not matching!')
return img_a, img_b
def _set_images(self, img_a, img_b):
"""
Set the correlation images of the PIV algorithm.
If the window_size and search_size differ, the second image needs to be padded with zeros.
:param img_a: first image
:param img_b: second image
"""
self.frame_a = img_a
pad = self.grid_spec.pad
self._padded_fb = np.pad(img_b, 2*(pad,), 'constant')
if self.grid_spec.pad == 0:
self.frame_b = self._padded_fb
else:
self.frame_b = self._padded_fb[pad:-pad, pad:-pad]
def _grid_creator(self):
"""Creates a grid according to the GridSpec."""
shape_fa = self.grid_spec.get_interogation_grid_shape()
shape_fb = self.grid_spec.get_search_grid_shape()
strides_fa = self.grid_spec.get_interogation_grid_strides()
strides_fb = self.grid_spec.get_search_grid_strides()
self.grid_a = as_strided(self.frame_a, strides=strides_fa, shape=shape_fa)
self.grid_b = as_strided(self._padded_fb, strides=strides_fb, shape=shape_fb)
def _get_window_frames(self, i, j):
"""Return sub images of image attribute a and b at position i,j.
:param int i: first index of grid coordinates
:param int j: second index of grid coordinates
:returns: sub frame as interrogation and search window for correlation
"""
return self.grid_a[i, j], self.grid_b[i, j]
def correlate_frames(self, method='gaussian'):
"""Correlation of all grid points, creating a velocity field.
:param str method: Method of the peak finding algorithm
"""
for i, j in np.ndindex(self.grid_spec.get_grid_shape()):
window_a, window_b = self._get_window_frames(i, j)
displacement = (self._correlator.get_displacement(window_a, window_b,
subpixel_method=method))
self.u[i, j] += displacement[0]
self.v[i, j] += displacement[1]
return self.u, self.v
def correlate_frames_2D(self):
"""Correlation function for two dimensional peak finder algorithm."""
self.correlate_frames(method='9point')
|
jr7/pypiv
|
pypiv/piv/direct_piv.py
|
Python
|
bsd-3-clause
| 4,514
|
[
"Gaussian"
] |
5a2099efdde778e41d57668c28272393be4bed20cea539d2068ebba92d52cf9a
|
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
# the Scientific Python netCDF 3 interface
# http://dirac.cnrs-orleans.fr/ScientificPython/
#from Scientific.IO.NetCDF import NetCDFFile as Dataset
# the 'classic' version of the netCDF4 python interface
# http://code.google.com/p/netcdf4-python/
import numpy as np
from netCDF4 import Dataset
from numpy import dtype # array module from http://numpy.scipy.org
"""
This example writes some surface pressure and temperatures
The companion program sfc_pres_temp_rd.py shows how to read the netCDF
data file created by this program.
This example demonstrates the netCDF Python API.
It will work either with the Scientific Python NetCDF version 3 interface
(http://dirac.cnrs-orleans.fr/ScientificPython/)
of the 'classic' version of the netCDF4 interface.
(http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4_classic-module.html)
To switch from one to another, just comment/uncomment the appropriate
import statements at the beginning of this file.
Jeff Whitaker <jeffrey.s.whitaker@noaa.gov> 20070202
"""
# Adapted for BOUT++ by
# George Breyiannis, JAEA, Nov 2013
#
# the output array to write will be nx x ny
ny = 100; nx = ny + 4
# dy of grid
dy = old_div(1.0, np.float(ny))
dx = dy
# create grid
dxarr=np.zeros((nx,ny),dtype='float32')+dx
dyarr=np.zeros((nx,ny),dtype='float32')+dy
xarr=np.arange(0.,np.float(nx),1.,dtype='float32')*dx
yarr=np.arange(0.,np.float(ny),1.,dtype='float32')*dy
# compute initial variables
rho=np.zeros((nx,ny),dtype='float32')+old_div(25.,(36.*np.pi))
p=np.zeros((nx,ny),dtype='float32')+old_div(5.,(12.*np.pi))
rho=1.
p=old_div(rho,3.)
v_x=np.zeros((nx,ny),dtype='float32')
Bx=np.zeros((nx,ny),dtype='float32')
for y in range(ny):
v_x[:,y]=-np.sin(2.*np.pi*yarr[y])
Bx[:,y]=-np.sin(2.*np.pi*yarr[y])
#Bx=Bx/np.sqrt(4.*np.pi)
v_y=np.zeros((nx,ny),dtype='float32')
By=np.zeros((nx,ny),dtype='float32')
for x in range(nx):
v_y[x,:]=np.sin(2.*np.pi*xarr[x])
By[x,:]=np.sin(4.*np.pi*xarr[x])
#By=By/np.sqrt(4.*np.pi)
# Domain inside core (periodic)
ixseps1 = nx
ixseps2 = nx
# open a new netCDF file for writing.
ncfile = Dataset('otv.grd.128.nc','w', format='NETCDF3_CLASSIC')
# output data.
# create the nx and ny dimensions.
ncfile.createDimension('x',nx)
ncfile.createDimension('y',ny)
ncfile.createDimension('single',1)
# create and write nx,ny variables
nxx=ncfile.createVariable('nx','i4',('single'))
nyy=ncfile.createVariable('ny','i4',('single'))
nxx[:]=nx
nyy[:]=ny
# Define the coordinate variables. They will hold the coordinate
# information, that is, xarr,yarr
dx = ncfile.createVariable('dx',dtype('float32').char,('x','y'))
dy = ncfile.createVariable('dy',dtype('float32').char,('x','y',))
# write data to coordinate vars.
dx[:,:] = dxarr
dy[:,:] = dyarr
# create and write ixseps* dimensions.
ix1=ncfile.createVariable('ixseps1','i4',('single'))
ix2=ncfile.createVariable('ixseps2','i4',('single'))
ix1[:]=ixseps1
ix2[:]=ixseps2
# create the corresponding variables
rho0 = ncfile.createVariable('rho0',dtype('float32').char,('x','y'))
p0 = ncfile.createVariable('p0',dtype('float32').char,('x','y'))
v0_x = ncfile.createVariable('v0_x',dtype('float32').char,('x','y'))
v0_y = ncfile.createVariable('v0_y',dtype('float32').char,('x','y'))
B0x = ncfile.createVariable('B0x',dtype('float32').char,('x','y'))
B0y = ncfile.createVariable('B0y',dtype('float32').char,('x','y'))
# write data to variables.
rho0[:,:]=rho
p0[:,:]=p
v0_x[:,:]=v_x
v0_y[:,:]=v_y
B0x[:,:]=Bx
B0y[:,:]=By
ncfile.close()
print('*** SUCCESS writing file otv.grd.py.nc!')
|
kevinpetersavage/BOUT-dev
|
examples/orszag-tang/generate.py
|
Python
|
gpl-3.0
| 3,675
|
[
"DIRAC",
"NetCDF"
] |
02e13a1c177fd3cc8519dbe0b33c86752bdf50d8e204b23dad3a0cf340eff9ba
|
#
# GA-Parser v1.1
# This program carves and parses Google Analytic vales (utma utmb, utmz)
# from a file (unallocated, memory dump, pagefile, etc.)
# and outputs them into a tsv form.
#
# To read more about it, visit my blog at http://az4n6.blogspot.com/
#
# Copyright (C) 2014 Mari DeGrazia (arizona4n6@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You can view the GNU General Public License at <http://www.gnu.org/licenses/>
__author__ = 'arizona4n6@gmail.com Mari DeGrazia'
__version__ = '1.1'
__copyright__ = 'Copyright (C) 2014 Mari DeGrazia'
__license__ = 'GNU'
__init__ = [""]
# Modified by CBRYCE for MantaRay.
# minor bug and type fixes (Thanks to Ron Dormido for testing it!)
import sys
import re
import urllib
import datetime
import os
# Needed for ewf support.
import pyewf
import pytsk3
######################### Functions to process individual cookie values (utma, utmb and utmz) ##########################
#parse the utma values. Takes the utma value as input
def parse_utma(cookie_value,file_offset,host,type,toPrint=True):
#create dictionary to hold utma values
utma_value = {}
utma_value["Created"]=""
utma_value["Created_Epoch"]=""
utma_value["2ndRecentVisit"]=""
utma_value["MostRecent"]=""
utma_value["Hit"]=""
utma_values = cookie_value.split('.')
if len(utma_values) == 0:
return 0
else:
#some utma domain hash values do not want to play nice and have some wonky values that include a period
#which throws off the count. These also have a colon in them, so look for the colon, if found, advance the count by 1
if ':' in utma_values[0]:
utma_value["Created_Epoch"] = (utma_values[3])
try:
utma_value["Created"]=(datetime.datetime.fromtimestamp(int(utma_values[3])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utma_value["Created"] = "Error on conversion"
#second most recent visit
utma_value["2ndRecentVisit_Epoch"] = (utma_values[4])
try:
utma_value["2ndRecentVisit"]=(datetime.datetime.fromtimestamp(int(utma_values[3])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utma_value["2ndRecentVisit"] = "Error on conversion"
#most recent visit
utma_value["MostRecent_Epoch"] = (utma_values[5])
try:
utma_value["MostRecent"]=(datetime.datetime.fromtimestamp(int(utma_values[5])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utma_value["MostRecent"] = "Error on conversion"
#number of visits
utma_value["Hit"]=(utma_values[6])
else:
#cookie create time
utma_value["Created_Epoch"] = (utma_values[2])
try:
utma_value["Created"]=(datetime.datetime.fromtimestamp(int(utma_values[2])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utma_value["Created"] = "Error on conversion"
#second most recent visit
utma_value["2ndRecentVisit_Epoch"] = (utma_values[3])
try:
utma_value["2ndRecentVisit"]=(datetime.datetime.fromtimestamp(int(utma_values[3])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utma_value["2ndRecentVisit"] = "Error on conversion"
#most recent visit
utma_value["MostRecent_Epoch"] = (utma_values[4])
try:
utma_value["MostRecent"]=(datetime.datetime.fromtimestamp(int(utma_values[4])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utma_value["MostRecent"] = "Error on conversion"
utma_value["Hit"]=(utma_values[5])
if toPrint == True:
output_utma.write(current_file + "\t" + str(file_offset) + "\t" + str(type) + "\t" + str(host) + "\t" + str(utma_value['Created']) + "\t" + str(utma_value["2ndRecentVisit"]) +"\t" + str(utma_value["MostRecent"]) + "\t" + str(utma_value["Hit"].rstrip("\n")) + "\n")
return utma_value
def parse_utmb(cookie_value,file_offset,host,c_type):
#create dictionary to hold utmb values
utmb_value = {}
#utmb_value["URL"]=""
utmb_value["PageViews"]=""
utmb_value["Outbound"]=""
utmb_value["StartCurrSess"]=""
utmb_value["StartCurrSess_Epoch"]=""
utmb_values = cookie_value.split('.')
if len(utmb_values) <= 1:
return 0
else:
#some utmb domain hash values do not want to play nice and have some wonky values that include a period
#which throws off the count. These also have a colon in them, so look for the colon, if found, advance the count by 1
if ':' in utmb_values[1]:
#Page View
utmb_value["PageViews"]=(utmb_values[2])
#outbound links
utmb_value["Outbound"]=(utmb_values[3])
#start of current session
#if date goes out to milliseconds, get rid of milliseconds
if len(utmb_values[4])<= 10:
#utmb_value["StartCurrSess_Epoch"] = int(utmb_values[4])
try:
utmb_value["StartCurrSess"]=(datetime.datetime.fromtimestamp(int(utmb_values[4])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utmb_value["StartCurrSess"]="Error on conversion"
else:
#utmb_value["StartCurrSess_Epoch"] = (int(utmb_values[4])/1000)
try:
utmb_value["StartCurrSess"]=(datetime.datetime.fromtimestamp(int(utmb_values[4])/1000).strftime("%Y-%m-%d %H:%M:%S"))
except:
utmb_value["StartCurrSess"]="Error on conversion"
else:
#Page Views
utmb_value["PageViews"]=(utmb_values[1])
#outbound links
utmb_value["Outbound"]=(utmb_values[2])
#start of current session
#if date goes out to milliseconds, get rid of milliseconds
if len(utmb_values[3])<= 10:
#utmb_value["StartCurrSess_Epoch"] = int(utmb_values[3])
try:
utmb_value["StartCurrSess"]=(datetime.datetime.fromtimestamp(int(utmb_values[3])).strftime("%Y-%m-%d %H:%M:%S"))
except:
utmb_value["StartCurrSess"]="Error on conversion"
else:
#utmb_value["StartCurrSess_Epoch"] = (int(utmb_values[3])/1000)
try:
utmb_value["StartCurrSess"]=(datetime.datetime.fromtimestamp(int(utmb_values[3])/1000).strftime("%Y-%m-%d %H:%M:%S"))
except:
utmb_value["StartCurrSess"]="Error on conversion"
output_utmb.write(current_file + "\t" + str(file_offset) + "\t" + str(c_type) + "\t" + str(host) + "\t" + utmb_value["PageViews"] + "\t" + utmb_value["Outbound"] + "\t" + utmb_value["StartCurrSess"] + "\n")
return utmb_value
def parse_utmz(cookie_value,file_offset,host,c_type):
#create dictionary to hold utmz values
utmz_value = {}
utmz_value["LastUpdate"]=""
utmz_value["LastUpdate_Epoch"]=""
utmz_value["Source"]=""
utmz_value["CampName"]=""
utmz_value["AccesMethod"]=""
utmz_value["Keyword"]=""
utmz_values = cookie_value.split('.')
if len(utmz_values) == 0:
return 0
else:
#Last Update time
if len(utmz_values[1])<=10:
utmz_value["LastUpdate_Epoch"] = int(utmz_values[1])
utmz_value["LastUpdate"]=(datetime.datetime.fromtimestamp(int(utmz_values[1])).strftime("%Y-%m-%d %H:%M:%S"))
#some utmz domain hash values do not want to play nice and have some wonky values that include a period
#which throws off the count. These also have a colon in them, so look for the colon, if found, advance the count by 1
else:
if ':' in utmz_values[1]:
utmz_value["LastUpdate_Epoch"] = int(utmz_values[2])
utmz_value["LastUpdate"]=(datetime.datetime.fromtimestamp(int(utmz_values[2])).strftime("%Y-%m-%d %H:%M:%S"))
else:
try:
utmz_value["LastUpdate_Epoch"] = int(utmz_values[1])/1000
utmz_value["LastUpdate"]=(datetime.datetime.fromtimestamp(int(utmz_values[1])/1000).strftime("%Y-%m-%d %H:%M:%S"))
except:
print "Error converting time for:" + cookie_value
#the utm values are not always in order. thus, we need to located each one in the string and write them out
#source (utmcsr)
if "utmcsr" in cookie_value:
utmcsr = cookie_value.split("utmcsr=")
#partition based on |, take the first section
utmcsr_value,temp1,temp2 = utmcsr[1].partition('|')
utmz_value["Source"]=utmcsr_value
else:
utmz_value["Source"]='utmcsr not found'
#campaign
if "utmccn" in cookie_value:
utmccn = cookie_value.split("utmccn=")
try:
utmccn_value,temp1, temp2 = utmccn[1].partition('|')
utmz_value["CampName"]=utmccn_value
except:
utmz_value["CampName"]="unable to process, check offset"
else:
utmz_value["CampName"]="utmccn not found"
#access method
if "utmcmd" in cookie_value:
utmcmd = cookie_value.split("utmcmd=")
try:
utmcmd_value,temp1, temp2 = utmcmd[1].partition('|')
utmz_value["AccesMethod"]=utmcmd_value
except:
utmz_value["AccesMethod"]='unable to process, check offset'
else:
utmz_value["AccesMethod"]='utmcmd not found'
#keywords
if "utmctr" in cookie_value:
utmctr = cookie_value.split("utmctr=")
try:
utmctr_value,temp1, temp2 = utmctr[1].partition('|')
utmz_value["Keyword"]=utmctr_value.replace('%20', " ")
except:
utmz_value["Keyword"]='unable to process, check offset'
else:
utmz_value["Keyword"]='utmctr not found'
#path to page on the site of the referring link
if "utmcct" in cookie_value:
utmcct = cookie_value.split("utmcct=")
try:
utmcct_value,temp1, temp2 = utmcct[1].partition('|')
utmz_value["ReferringPage"]=utmcct_value.replace('%20', " ")
except:
utmz_value["ReferringPage"]='unable to process, check offset'
else:
utmz_value["ReferringPage"]='utmcct not found'
output_utmz.write(current_file + "\t" + str(file_offset) + "\t" + str(c_type) + "\t" + str(host) + "\t" + str(utmz_value['LastUpdate']) + "\t" + str(utmz_value["Source"]) +"\t" + str(utmz_value["CampName"]) + "\t" + utmz_value["AccesMethod"] + "\t" + str(utmz_value["Keyword"]) + "\t" + utmz_value["ReferringPage"] + "\n")
return utmz_value
def parse_utm_gif(line):
utm_gif={}
#find utma value, _utma%3D55650728.920979037.1398568437.1398568437.1398568437.1
pattern = r'(__utma%3D(\d+).(\d+).(\d+).(\d+).(\d+).(\d+)%3B)'
utma = re.search(pattern,line)
if utma != None:
file_offset = ""
host = ""
type = ""
#strip additional characters used for pattern matching so all we have are the utma values
value = utma.group()[9:-3]
#utma_v =parse_utma()
utma_v = parse_utma(value,file_offset,host,"gif",False)
utm_gif["utma_created"] = utma_v['Created']
utm_gif["utma_previous"] = utma_v["2ndRecentVisit"]
utm_gif["utma_current"] = utma_v["MostRecent"]
utm_gif["utma_hit"] = utma_v["Hit"]
else:
#na na na na hey hey hey goodbye
utm_gif["utma_created"] = "NA"
utm_gif["utma_previous"] = "NA"
utm_gif["utma_current"] = "NA"
utm_gif["utma_hit"] = "NA"
#utmhn -hostname utmhn=www.reddit.com
pattern = r'(utmhn=)(.*?)&'
utmhn = re.search(pattern,line)
if utmhn != None:
utm_gif["utmhn_hostname"]=utmhn.group(2)
else:
utm_gif["utmhn_hostname"]="NA"
#utmdt page titlePage
pattern = r'(utmdt=)(.*?)&'
utmdt = re.search(pattern,line)
if utmdt != None:
utm_gif["utmdt_page_title"] = urllib.unquote(utmdt.group(2))
else:
utm_gif["utmdt_page_title"] = "NA"
#utmp page request
pattern = r'(utmp=)(.*?)&'
utmp = re.search(pattern,line)
if utmp != None:
utm_gif["utmp_page_request"]=urllib.unquote(utmp.group(2))
else:
utm_gif["utmp_page_request"]="NA"
#utmr full referrall URL
pattern = r'(utmr=)(.*?)&'
utmr = re.search(pattern,line)
if utmr != None:
utm_gif["utmr_full_referral"] = urllib.unquote(utmr.group(2))
else:
utm_gif["utmr_full_referral"] = "NA"
return utm_gif
######################### Functions to grep utma, umtb, utmz and utm.gif values for various browsers ##########################
def process_chrome_utma(pattern, chunk):
global chrome_utma_count
global loop
count = 0
#max size of host name
max_size = 70
for m in pattern.finditer(chunk):
print "Chrome utma hit found at offset " + str(m.start())
file_offset = (loop*(maxfilesize))+m.start()
beginning_offset = m.start()-1
while count < max_size:
chara = chunk[beginning_offset]
try:
ascii_char= chara.decode('ascii')
valid = re.match(p_allowed_chars, ascii_char) is not None
if valid:
beginning_offset = beginning_offset-1
count = count + 1
else:
break
except:
break
#reset count
count = 0
end_offset = m.start()
host = chunk[beginning_offset+1:end_offset]
file_offset = (loop*(maxfilesize))+beginning_offset
parse_utma(m.group(1),file_offset,host,"chrome")
chrome_utma_count = chrome_utma_count + 1
def process_chrome_utmb(pattern, chunk):
global chrome_utmb_count
global loop
count = 0
max_size = 70
#parse the chrome utmb cookies
p = re.compile(chrome_pattern_utmb)
for m in p.finditer(chunk):
print "Chrome utmb Hit found at offset " + str(m.start())
chrome_utmb_count = chrome_utmb_count + 1
beginning_offset = m.start()-1
while count < max_size:
chara = chunk[beginning_offset]
try:
ascii_char= chara.decode('ascii')
valid = re.match(p_allowed_chars, ascii_char) is not None
if valid:
beginning_offset = beginning_offset-1
count = count + 1
else:
break
except:
break
end_offset = m.start()
host = chunk[beginning_offset+1:end_offset]
#reset count
count = 0
file_offset = (loop*(maxfilesize))+beginning_offset
parse_utmb(m.group(2),file_offset,host,"chrome")
def process_chrome_utmz(pattern,chunk):
global chrome_utmz_count
global loop
count = 0
#maximun size of Hostname
max_size = 70
p = re.compile(chrome_pattern_utmz)
for m in p.finditer(chunk):
print "Chrome utmz Hit found at offset " + str(m.start())
chrome_utmz_count = chrome_utmz_count + 1
beginning_offset = m.start()-1
while count < max_size:
chara = chunk[beginning_offset]
try:
ascii_char= chara.decode('ascii')
valid = re.match(p_allowed_chars, ascii_char) is not None
if valid:
beginning_offset = beginning_offset-1
count = count + 1
else:
break
except:
break
end_offset = m.start()
host = chunk[beginning_offset+1:end_offset]
#reset count
count = 0
file_offset = (loop*(maxfilesize))+beginning_offset
parse_utmz((m.group())[6:-3],file_offset,host,"chrome")
def process_firefox_utma(pattern, chunk):
global ff_utma_count
global loop
count = 0
for m in pattern.finditer(chunk):
print "FireFox utma hit found at offset " + str(m.start())
parts = re.split('([0-9]{0,10}\.){5}[0-9]{0,200}',m.group())
host = parts[2]
if host != "":
file_offset = (loop*(maxfilesize))+m.start()
parse_utma(m.group(),file_offset,host,"firefox")
ff_utma_count = ff_utma_count + 1
def process_firefox_utmb(pattern,chunk):
global ff_utmb_count
global loop
#count = 0
for m in pattern.finditer(chunk):
print "FireFox utmb hit found at offset " + str(m.start())
#get utmb value
ff_utmb_count = ff_utmb_count + 1
v_pattern = '([0-9]{0,10}\.){3}[0-9]{0,10}'
v = re.search(v_pattern,m.group())
value = v.group()
#get __utmb host
parts = re.split('([0-9]{0,10}\.){3}[0-9]{0,10}',m.group())
host = parts[2]
file_offset = (loop*(maxfilesize))+m.start()
parse_utmb(str(value),file_offset,host,"firefox")
def process_firefox_utmz(p,chunk):
global ff_utmz_count
global loop
global processed
for m in p.finditer(chunk):
ff_utmz_count = ff_utmz_count + 1
print "FireFox utmz hit found at offset " + str(m.start())
pattern = '([0-9]{0,20}\.){4}'
value = re.search('([0-9]{0,20}\.){4}',m.group())
split1 = re.split(pattern,m.group())
v= value.group()
pattern = r'((\.?[a-zA-Z\d-]{0,63}\.){1,2}[a-zA-Z]{0,4}\/[TS]?)'
split2 = re.split(pattern,split1[2])
h = re.search(pattern,m.group())
if h != None:
host = h.group()[:-2]
processed = processed + 1
file_offset = (loop*(maxfilesize))+m.start()
parse_utmz(m.group(2),file_offset,host,"firefox")
else:
file_offset = (loop*(maxfilesize))+m.start()
info = str(file_offset) + " " + str(m.group())
not_processed.append(info)
def process_ie_utma(p,chunk):
global ie_utma_count
p = re.compile(ie_utma_pattern)
for m in p.finditer(chunk):
file_offset = (loop*(maxfilesize))+m.start()
print "IE utma hit found at offset " + str(file_offset)
ie_utma_count = ie_utma_count+1
host = str(m.group(5).rstrip('\n'))
parse_utma(m.group(2),file_offset,host, "ie")
def process_ie_utmb(p,chunk):
global ie_utmb_count
for m in p.finditer(chunk):
file_offset = (loop*(maxfilesize))+m.start()
print "IE utmb hit found at offset " + str(file_offset)
ie_utmb_count = ie_utmb_count + 1
host = m.group(5).rstrip("\n")
file_offset = (loop*(maxfilesize))+m.start()
parse_utmb(m.group(2).rstrip("\n"),file_offset,host,"ie")
def process_ie_utmz(p,chunk):
global ie_utmz_count
for m in p.finditer(chunk):
file_offset = (loop*(maxfilesize))+m.start()
print "IE utmz hit found at offset " + str(file_offset)
#get utmb value
ie_utmz_count = ie_utmz_count + 1
host = m.group(4).rstrip("\n")
file_offset = (loop*(maxfilesize))+m.start()
parse_utmz(m.group(2).rstrip("\n"),file_offset,host,"ie")
def process_apple_utma(pattern,chunk):
global loop
global apple_utma_count
for m in pattern.finditer(chunk):
file_offset = (loop*(maxfilesize))+m.start()
print "apple utma hit found at offset " + str(file_offset)
host = m.group(4)
parse_utma(m.group(2),file_offset,host,"apple")
apple_utma_count = apple_utma_count + 1
def process_apple_utmb(pattern, chunk):
global loop
global apple_utmb_count
for m in pattern.finditer(chunk):
file_offset = (loop*(maxfilesize))+m.start()
print "apple utmb hit found at offset " + str(file_offset)
apple_utmb_count = apple_utmb_count + 1
host = m.group(4)
parse_utmb(m.group(2),file_offset,host,"apple")
file_offset = (loop*(maxfilesize))+m.start()
def process_apple_utmz(pattern,chunk):
global apple_utmz_count
global loop
for m in pattern.finditer(chunk):
file_offset = (loop*(maxfilesize))+m.start()
print "apple utmz hit found at offset " + str(file_offset)
apple_utmz_count = apple_utmz_count + 1
host = m.group(4)
parse_utmz(m.group(2),file_offset,host,"apple")
def process_utm_gif_UTF16(pattern,chunk):
global gif__UTF16_count
count = 0
line = ""
max_size = 2000
#gif_cache_pattern
p = re.compile(gif_cache_pattern_UTF16)
for m in p.finditer(chunk):
count =0
gif__UTF16_count = gif__UTF16_count + 1
file_offset = (loop*(maxfilesize))+m.start()
print "UTF16 gif hit found at offset " + str(file_offset)
line = ""
beginning_offset = m.start()
while count < max_size:
chara = chunk[beginning_offset] + chunk[beginning_offset+1]
try:
ascii_char= chara.decode('utf-16','ignore')
line = line + str(ascii_char)
beginning_offset +=2
count = count +2
except:
break
if line:
utm_gif = parse_utm_gif(line)
#print utma values
output_utm_gif.write(current_file + "\t"+ str(file_offset) + "\t" + "utm.gif UTF16\t" + (utm_gif['utma_created']) + "\t" + str(utm_gif["utma_previous"]) +"\t" + str(utm_gif["utma_current"]) + "\t" + str(utm_gif["utma_hit"]) + "\t")
#print utmhn value
output_utm_gif.write(utm_gif["utmhn_hostname"]+"\t")
#utmdt page title
output_utm_gif.write(utm_gif["utmdt_page_title"]+ "\t")
#utmp page request
output_utm_gif.write(utm_gif["utmp_page_request"] + "\t")
#utmpr full referral URL
output_utm_gif.write(utm_gif["utmr_full_referral"]+ "\n")
def process_utm_gif_ASCII(pattern, chunk):
count = 0
line = ""
max_size = 2000
global gif_ASCII_count
p = re.compile(gif_cache_pattern_ASCII)
for m in p.finditer(chunk):
count =0
gif_ASCII_count = gif_ASCII_count + 1
file_offset = (loop*(maxfilesize))+m.start()
print "ASCII gif hit found at offset " + str(file_offset)
line = ""
beginning_offset = m.start()
while count < max_size:
try:
chara = chunk[beginning_offset]
except:
not_processed.append(str(file_offset) + m.group())
break
try:
ascii_char= chara.decode('ascii')
line = line + str(ascii_char)
beginning_offset +=1
count = count +1
except:
break
if line:
utm_gif = parse_utm_gif(line)
#print utma values
output_utm_gif.write(current_file + "\t"+ str(file_offset) + "\tutm.gif ASCII\t" + (utm_gif['utma_created']) + "\t" + str(utm_gif["utma_previous"]) +"\t" + str(utm_gif["utma_current"]) + "\t" + str(utm_gif["utma_hit"]) + "\t")
#print utmhn value
output_utm_gif.write(utm_gif["utmhn_hostname"]+"\t")
#utmdt page title
output_utm_gif.write(utm_gif["utmdt_page_title"]+ "\t")
#utmp page request
output_utm_gif.write(utm_gif["utmp_page_request"] + "\t")
#utmpr full referall URL
output_utm_gif.write(utm_gif["utmr_full_referral"]+ "\n")
# From https://code.google.com/p/libewf/wiki/pyewf
# Added by chapinb
class EwfImgInfo(pytsk3.Img_Info):
def __init__(self, filename):
self.ewf_handle = pyewf.handle()
file_names = pyewf.glob(filename)
self.ewf_handle.open(file_names)
super(EwfImgInfo, self).__init__()
def read(self, offset, size):
self.ewf_handle.seek(offset)
return self.ewf_handle.read(size)
def get_size(self):
return self.ewf_handle.get_media_size()
if __name__ == '__main__':
######################### Main ##########################
usage = "\n\nThis program parses a file (think unallocated, memdump, pagefile, dd image, iTunes backup etc.) to " \
"locate Google Analytic cookies. In addition to the cookies, it can also locate Google Analytic values in " \
"gif cache entries. It then exports out the cookies into TSV files. Supported browsers are Chrome,Firefox, " \
"IE and Safari. Select at least one browser and it's recommended to try the --gif option with it, unless " \
"you're Ron Dormido, then you are required to sing Baby Got Back first.\n\n Example:\n\ GA-parser.py " \
"--chrome --firefox --gif -f /home/sansforensics/pagefile.sys -o /home/sansforensics/reports\n\n\
GA-parser.py --chrome --firefox --gif -d /home/sansforensics/allcookies -o /home/sansforensics/reports"
from optparse import OptionParser
input_parser = OptionParser(usage=usage)
#full file path to file
input_parser.add_option("-f", dest = "input_file", metavar="memdump.mem", help = "file to carve for cookies")
#directory path if selected
input_parser.add_option("-d", dest = "directory", metavar="/document/allcookies",
help = "directory holding files to parse")
#path to directory to hole reports
input_parser.add_option("-o", dest = "output", help = "name of directory file for output",
metavar = "/home/documents/report")
#user arguments
input_parser.add_option("--chrome", action="store_true", dest="chrome", help = "choose one or more")
input_parser.add_option("--firefox", action="store_true", dest="firefox", help = "choose one or more")
input_parser.add_option("--ie", action="store_true", dest="ie", help = "choose one or more")
input_parser.add_option("--gif",action = "store_true", dest="gif_cache", help = "choose one or more")
input_parser.add_option("--apple", action="store_true", dest="apple", help = "choose one or more")
input_parser.add_option("--ewf", action="store_true", dest="ewf", help = "Select this is the input file is EWF "
"format. Libewf and PyTSK3 must be "
"installed")
# input_parser.add_option("--threads", dest="threads", help="Number of threads to use in Dir Scan", metavar="4",
# type="int", default=1)
input_parser.add_option("--sigs", action="store_true", dest="sigs", help="Enable scanning for ONLY common file extensions for cookies")
(options, args) = input_parser.parse_args()
chrome = options.chrome
firefox = options.firefox
gif_cache = options.gif_cache
ie = options.ie
apple = options.apple
output_folder = options.output
directory = options.directory
ewf_file = options.ewf
# threads = options.threads
sigs = options.sigs
#no arguments given by user,exit
if len(sys.argv) == 1:
input_parser.print_help()
exit(0)
#at least one option needs to be selected
if chrome is not True and firefox is not True and gif_cache is not True and ie is not True and apple is not True:
print "Please select at least one of the following: --chrome, --firefox --ie --apple or --gif"
exit(0)
print options.output
if "\\" in options.output:
seperator = "\\"
if '/' in options.output:
seperator = "/"
#open files for output
if chrome or firefox or ie or apple:
output_utmz = open(output_folder + seperator + "utmz.tsv", "a+")
output_utma = open(output_folder + seperator + "utma.tsv", "a+")
output_utmb = open(output_folder + seperator + "utmb.tsv","a+")
#write out file headers
output_utma.write("File\tOffset\tType\tHost\tCreated\t2ndRecentVisit\tMostRecent\tHits\n")
output_utmb.write("File\tOffset\tType\tHost\tPage Views\tOutbound Links\tStart Current Session\n")
output_utmz.write("File\tOffset\tType\tHost\tLast Update\tSource\tCampaign name\tAccess method\tKeyword\tReferringPage\n")
if gif_cache:
output_utm_gif = open(output_folder + seperator + "utm_gif.tsv","a")
#matches unicode 16 __utm.gif?
gif_cache_pattern_UTF16 = re.compile(r'\x5F\x00\x5F\x00\x75\x00\x74\x00\x6D\x00\x2E\x00\x67\x00\x69\x00\x66\x00\x3F\\?')
#matches ascii __utm.gif found found in URL strings
gif_cache_pattern_ASCII = re.compile(r'__utm.gif?')
#write out file header
output_utm_gif.write("File\tOffset\tType\tutma_first\tutma_previous\tutma_last\tutma_hit\tutmhn_hostname\tudmt_page_title\tutmp_page_request\ttutmr_full_referral URL\n")
if chrome:
#chrome utm grep patterns
chrome_pattern_utma = re.compile(r'__utma(([0-9]{0,10}\.){5}([0-9])*)(\/)')
chrome_pattern_utmz = re.compile(r'(__utmz)(([0-9]{0,10}\.){4}utm(.*?)\s*\x2F\x00\x2E)')
chrome_pattern_utmb = re.compile(r'(__utmb)(([0-9]{0,10}\.){3}[0-9]{0,10})')
if firefox:
#firefox utm grep patterns
moz_pattern_utma = re.compile(r'(__utma(([0-9]{0,10}\.){5}[0-9]{1,5}))[^\/](\.?[a-zA-Z\d-]{0,63}){0,4}')
moz_pattern_utmb = re.compile(r'__utmb([0-9]{0,10}\.){3}[0-9]{0,10}([a-zA-Z\d-]{,63}\.[a-zA-Z\d-]{,63}){1,4}')
moz_pattern_utmz = re.compile(r'(__utmz)(([0-9]{0,20}\.){4}[ -~]*?)([a-zA-Z\d-]{,63}\.[a-zA-Z\d-]{,63}){0,4}\/[TS]{1}')
if ie:
#ie patterns
ie_utma_pattern = re.compile(r'(__utma)(\x0a(([0-9]{0,10}\.){5})[0-9]{1,10}\n)([ -~]{1,64}\x0a)')
ie_utmb_pattern = re.compile(r'(__utmb)(\x0a(([0-9]{0,10}\.){3})[0-9]{1,10}\n)([ -~]{1,64}\x0a)')
ie_utmz_pattern = re.compile(r'(__utmz)\x0a(([0-9]{0,10}\.){4}[ -~]{1,200}\x0a)([ -~]{1,64}\x0a)')
if apple:
apple_utma_pattern = re.compile(r'(__utma)\x00(([0-9]{0,10}\.){5}[0-9]{1,5})\x00([ -~]{1,64}\x00\/)')
apple_utmb_pattern = re.compile(r'(__utmb)\x00(([0-9]{0,10}\.){3}[0-9]{10})\x00([ -~]{1,64})\x00\/')
apple_utmz_pattern = re.compile(r'(__utmz)\x00(([0-9]{0,10}\.){4}[ -~]{1,200}\x00)([ -~]{1,64}\x00\/)')
#count vars to keep track of total records processed
chrome_utma_count = 0
chrome_utmb_count = 0
chrome_utmz_count = 0
ff_utma_count = 0
ff_utmb_count = 0
ff_utmz_count = 0
ie_utma_count=0
ie_utmb_count=0
ie_utmz_count=0
apple_utma_count = 0
apple_utmb_count = 0
apple_utmz_count = 0
gif__UTF16_count = 0
gif_ASCII_count = 0
processed = 0
not_processed= []
#can be increased if more ram is available
try:
# Using free, gather the size of available RAM and use half of it as the maxfilesize to process
import subprocess
maxfilesize = subprocess.check_output("free -o | awk 'NR==2{print $4}'", shell=True)
maxfilesize = int(maxfilesize.strip())/2
print "Using Available_RAM/2 for maxfilesize: ", maxfilesize
except:
# If RAM cannot be calculated, use default of 4000
print "Using default chunk size for files"
maxfilesize = 400000
#printable chars allowed in urls and domain names
allowed_chars = r'[\.a-zA-Z\d-]'
p_allowed_chars = re.compile(allowed_chars)
#loop to keep track of how many segments a file is broken into
global loop
loop = 0
#process all files in a directory
if directory != None:
#check to see if the directory exists, if not, silly user.. go find the right directory!
if os.path.isdir(options.directory) == False:
print ("Could not locate directory. Please check path and try again")
exit (0)
#crap, now we need to check to see if the path is a Windows or Linux path
if '\\' in options.directory:
separator = "\\"
if '/' in options.directory:
seperator = "/"
#loop through each file
file_array=[]
print "Scanning for Files..."
for subdir, dirs, files in os.walk(options.directory):
for fname in files:
if sigs:
if fname.endswith(".txt") or fname.endswith(".js") or fname.endswith(".html") or fname.endswith(".htm")\
or fname.endswith(".php") or fname.endswith(".sqlite") or fname.lower().endswith("cookies") or \
fname.endswith(".gif"):
file_array.append(os.path.join(subdir,fname))
elif fname.lower().endswith("pagefile.sys") or fname.lower().endswith("hiberfil.sys"):
file_array.append(os.path.join(subdir,fname))
else:
file_array.append(os.path.join(subdir,fname))
print "Completed scan. Will process ", len(file_array), " files."
for current_file in file_array:
try:
size = os.path.getsize(current_file)
except:
continue
print("Processing " + current_file + ", " + str(size))
#reset loop count
loop = 0
try:
file_object = open(current_file, 'rb')
except:
print("Error: Cannot Open " + current_file + "...skipping")
pass
#read the file into chunks so we can process large files
while 1:
try:
if os.path.getsize(current_file) > maxfilesize:
chunk = file_object.read(maxfilesize)
else:
chunk = file_object.read()
except:
print("Could not read chunk from: " + current_file)
pass
if not chunk:
break
if chrome:
process_chrome_utma(chrome_pattern_utma, chunk)
process_chrome_utmb(chrome_pattern_utmb, chunk)
process_chrome_utmz(chrome_pattern_utmz, chunk)
if firefox:
process_firefox_utma(moz_pattern_utma, chunk)
process_firefox_utmb(moz_pattern_utmb, chunk)
process_firefox_utmz(moz_pattern_utmz, chunk)
if ie:
process_ie_utma(ie_utma_pattern, chunk)
process_ie_utmb(ie_utmb_pattern, chunk)
process_ie_utmz(ie_utmz_pattern, chunk)
if apple:
process_apple_utma(apple_utma_pattern, chunk)
process_apple_utmb(apple_utmb_pattern, chunk)
process_apple_utmz(apple_utmz_pattern, chunk)
if gif_cache:
process_utm_gif_UTF16(gif_cache_pattern_UTF16, chunk)
process_utm_gif_ASCII(gif_cache_pattern_ASCII, chunk)
loop += 1
file_object.close()
#just one file was selected, process that
if options.input_file:
file_object = open(options.input_file, 'rb')
current_file = options.input_file
# Handle ewf files.
if ewf_file:
file_object = EwfImgInfo(options.input_file)
off = 0
while 1:
if ewf_file:
off += maxfilesize
chunk = file_object.read(off, maxfilesize)
else:
chunk = file_object.read(maxfilesize)
if not chunk:
break
if chrome:
process_chrome_utma(chrome_pattern_utma, chunk)
process_chrome_utmb(chrome_pattern_utmb, chunk)
process_chrome_utmz(chrome_pattern_utmz, chunk)
if firefox:
process_firefox_utma(moz_pattern_utma,chunk)
process_firefox_utmb(moz_pattern_utmb,chunk)
process_firefox_utmz(moz_pattern_utmz,chunk)
if ie:
process_ie_utma(ie_utma_pattern,chunk)
process_ie_utmb(ie_utmb_pattern,chunk)
process_ie_utmz(ie_utmz_pattern,chunk)
if apple:
process_apple_utma(apple_utma_pattern,chunk)
process_apple_utmb(apple_utmb_pattern,chunk)
process_apple_utmz(apple_utmz_pattern,chunk)
if gif_cache:
process_utm_gif_UTF16(gif_cache_pattern_UTF16, chunk)
process_utm_gif_ASCII(gif_cache_pattern_ASCII ,chunk)
loop += 1
file_object.close()
print "\r\r************ Summary ************"
if chrome:
print "Chrome __utma count processed: " + str(chrome_utma_count)
print "Chrome __utmb count processed: " + str(chrome_utmb_count)
print "Chrome __utmz count found: " + str(chrome_utmz_count)
if firefox:
print "FireFox __utma count processed: " + str(ff_utma_count)
print "FireFox __utmb count processed: " + str(ff_utmb_count)
print "FireFox __utmz count found: " + str(ff_utmz_count)
print "FireFox __utmz count processed: " + str(processed)
if ie:
print "IE __utma count processed: " + str(ie_utma_count)
print "IE __utmb count processed: " + str(ie_utmb_count)
print "IE __utmz count processed: " + str(ie_utmz_count)
if apple:
print "apple __utma count processed: " + str(apple_utma_count)
print "apple __utmb count processed: " + str(apple_utmb_count)
print "apple __utmz count processed: " + str(apple_utmz_count)
if gif_cache:
print "utm_gif? UTF16 cache processed: " + str(gif__UTF16_count)
print "utm_gif? ASCII cache processed: " + str(gif_ASCII_count)
output_utm_gif.close()
if chrome or firefox or ie:
output_utma.close()
output_utmb.close()
output_utmz.close()
if not_processed:
print "Unable to process:"
for item in not_processed:
print item
print "Total Entries Found: " + str(chrome_utma_count + chrome_utmb_count + chrome_utmz_count + ff_utma_count +
ff_utmb_count + ff_utmz_count + processed + ie_utma_count + ie_utmb_count +
ie_utmz_count + apple_utma_count + apple_utmb_count + apple_utmz_count +
gif__UTF16_count + gif_ASCII_count)
|
mantarayforensics/mantaray
|
Tools/Python/ga_parser.py
|
Python
|
gpl-3.0
| 41,010
|
[
"VisIt"
] |
ac7db979c30d5b7892b562a5e350d3661354934f94c86792b5bd08768d3f9f7d
|
"""Support for package tracking sensors from 17track.net."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_LOCATION, CONF_PASSWORD, CONF_SCAN_INTERVAL,
CONF_USERNAME)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION_COUNTRY = 'destination_country'
ATTR_FRIENDLY_NAME = 'friendly_name'
ATTR_INFO_TEXT = 'info_text'
ATTR_ORIGIN_COUNTRY = 'origin_country'
ATTR_PACKAGES = 'packages'
ATTR_PACKAGE_TYPE = 'package_type'
ATTR_STATUS = 'status'
ATTR_TRACKING_INFO_LANGUAGE = 'tracking_info_language'
ATTR_TRACKING_NUMBER = 'tracking_number'
CONF_SHOW_ARCHIVED = 'show_archived'
CONF_SHOW_DELIVERED = 'show_delivered'
DATA_PACKAGES = 'package_data'
DATA_SUMMARY = 'summary_data'
DEFAULT_ATTRIBUTION = 'Data provided by 17track.net'
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
ENTITY_ID_TEMPLATE = 'package_{0}_{1}'
NOTIFICATION_DELIVERED_ID_SCAFFOLD = 'package_delivered_{0}'
NOTIFICATION_DELIVERED_TITLE = 'Package Delivered'
NOTIFICATION_DELIVERED_URL_SCAFFOLD = 'https://t.17track.net/track#nums={0}'
VALUE_DELIVERED = 'Delivered'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SHOW_ARCHIVED, default=False): cv.boolean,
vol.Optional(CONF_SHOW_DELIVERED, default=False): cv.boolean,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
from py17track import Client
from py17track.errors import SeventeenTrackError
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(websession)
try:
login_result = await client.profile.login(
config[CONF_USERNAME], config[CONF_PASSWORD])
if not login_result:
_LOGGER.error('Invalid username and password provided')
return
except SeventeenTrackError as err:
_LOGGER.error('There was an error while logging in: %s', err)
return
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
data = SeventeenTrackData(
hass, client, async_add_entities, scan_interval,
config[CONF_SHOW_ARCHIVED], config[CONF_SHOW_DELIVERED])
await data.async_update()
sensors = []
for status, quantity in data.summary.items():
sensors.append(SeventeenTrackSummarySensor(data, status, quantity))
for package in data.packages:
sensors.append(SeventeenTrackPackageSensor(data, package))
async_add_entities(sensors, True)
class SeventeenTrackSummarySensor(Entity):
"""Define a summary sensor."""
def __init__(self, data, status, initial_state):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._data = data
self._state = initial_state
self._status = status
@property
def available(self):
"""Return whether the entity is available."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return 'mdi:package'
@property
def name(self):
"""Return the name."""
return 'Seventeentrack Packages {0}'.format(self._status)
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return 'summary_{0}_{1}'.format(
self._data.account_id, slugify(self._status))
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return 'packages'
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
package_data = []
for package in self._data.packages:
if package.status != self._status:
continue
package_data.append({
ATTR_FRIENDLY_NAME: package.friendly_name,
ATTR_INFO_TEXT: package.info_text,
ATTR_STATUS: package.status,
ATTR_TRACKING_NUMBER: package.tracking_number,
})
if package_data:
self._attrs[ATTR_PACKAGES] = package_data
self._state = self._data.summary.get(self._status)
class SeventeenTrackPackageSensor(Entity):
"""Define an individual package sensor."""
def __init__(self, data, package):
"""Initialize."""
self._attrs = {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DESTINATION_COUNTRY: package.destination_country,
ATTR_INFO_TEXT: package.info_text,
ATTR_LOCATION: package.location,
ATTR_ORIGIN_COUNTRY: package.origin_country,
ATTR_PACKAGE_TYPE: package.package_type,
ATTR_TRACKING_INFO_LANGUAGE: package.tracking_info_language,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
self._data = data
self._friendly_name = package.friendly_name
self._state = package.status
self._tracking_number = package.tracking_number
@property
def available(self):
"""Return whether the entity is available."""
return bool([
p for p in self._data.packages
if p.tracking_number == self._tracking_number
])
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return 'mdi:package'
@property
def name(self):
"""Return the name."""
name = self._friendly_name
if not name:
name = self._tracking_number
return 'Seventeentrack Package: {0}'.format(name)
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return ENTITY_ID_TEMPLATE.format(
self._data.account_id, self._tracking_number)
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
if not self._data.packages:
return
try:
package = next((
p for p in self._data.packages
if p.tracking_number == self._tracking_number))
except StopIteration:
# If the package no longer exists in the data, log a message and
# delete this entity:
_LOGGER.info(
'Deleting entity for stale package: %s', self._tracking_number)
reg = await self.hass.helpers.entity_registry.async_get_registry()
self.hass.async_create_task(reg.async_remove(self.entity_id))
self.hass.async_create_task(self.async_remove())
return
# If the user has elected to not see delivered packages and one gets
# delivered, post a notification:
if package.status == VALUE_DELIVERED and not self._data.show_delivered:
_LOGGER.info('Package delivered: %s', self._tracking_number)
self.hass.components.persistent_notification.create(
'Package Delivered: {0}<br />'
'Visit 17.track for more infomation: {1}'
''.format(
self._tracking_number,
NOTIFICATION_DELIVERED_URL_SCAFFOLD.format(
self._tracking_number)),
title=NOTIFICATION_DELIVERED_TITLE,
notification_id=NOTIFICATION_DELIVERED_ID_SCAFFOLD.format(
self._tracking_number))
return
self._attrs.update({
ATTR_INFO_TEXT: package.info_text,
ATTR_LOCATION: package.location,
})
self._state = package.status
class SeventeenTrackData:
"""Define a data handler for 17track.net."""
def __init__(
self, hass, client, async_add_entities, scan_interval,
show_archived, show_delivered):
"""Initialize."""
self._async_add_entities = async_add_entities
self._client = client
self._hass = hass
self._scan_interval = scan_interval
self._show_archived = show_archived
self.account_id = client.profile.account_id
self.packages = []
self.show_delivered = show_delivered
self.summary = {}
self.async_update = Throttle(self._scan_interval)(self._async_update)
async def _async_update(self):
"""Get updated data from 17track.net."""
from py17track.errors import SeventeenTrackError
try:
packages = await self._client.profile.packages(
show_archived=self._show_archived)
_LOGGER.debug('New package data received: %s', packages)
if not self.show_delivered:
packages = [p for p in packages if p.status != VALUE_DELIVERED]
# Add new packages:
to_add = set(packages) - set(self.packages)
if self.packages and to_add:
self._async_add_entities([
SeventeenTrackPackageSensor(self, package)
for package in to_add
], True)
# Remove archived packages from the entity registry:
to_remove = set(self.packages) - set(packages)
reg = await self._hass.helpers.entity_registry.async_get_registry()
for package in to_remove:
entity_id = reg.async_get_entity_id(
'sensor', 'seventeentrack',
ENTITY_ID_TEMPLATE.format(
self.account_id, package.tracking_number))
if not entity_id:
continue
self._hass.async_create_task(reg.async_remove(entity_id))
self.packages = packages
except SeventeenTrackError as err:
_LOGGER.error('There was an error retrieving packages: %s', err)
self.packages = []
try:
self.summary = await self._client.profile.summary(
show_archived=self._show_archived)
_LOGGER.debug('New summary data received: %s', self.summary)
except SeventeenTrackError as err:
_LOGGER.error('There was an error retrieving the summary: %s', err)
self.summary = {}
|
jnewland/home-assistant
|
homeassistant/components/seventeentrack/sensor.py
|
Python
|
apache-2.0
| 10,914
|
[
"VisIt"
] |
a71dd8af7f48f77e7c4dea91934a1b0f44b2d4ab1639ae4a2c23c8747cce7e13
|
#!/usr/bin/env python
# Copyright 2014-2018,2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Timothy Berkelbach <tim.berkelbach@gmail.com>
#
'''
parse CP2K format
'''
import re
from pyscf.gto.basis import parse_nwchem
from pyscf import __config__
DISABLE_EVAL = getattr(__config__, 'DISABLE_EVAL', False)
MAXL = 8
def parse(string, optimize=False):
'''Parse the basis text which is in CP2K format, return an internal
basis format which can be assigned to :attr:`Mole.basis`
Lines started with # are ignored.
'''
bastxt = []
for dat in string.splitlines():
x = dat.split('#')[0].strip()
if (x and not x.startswith('END') and not x.startswith('BASIS')):
bastxt.append(x)
return _parse(bastxt, optimize)
def load(basisfile, symb, optimize=False):
return _parse(search_seg(basisfile, symb), optimize)
def _parse(blines, optimize=False):
header_ln = blines.pop(0) # noqa: F841
nsets = int(blines.pop(0))
basis = []
for n in range(nsets):
comp = [int(p) for p in blines.pop(0).split()]
lmin, lmax, nexps, ncontractions = comp[1], comp[2], comp[3], comp[4:]
basis_n = [[l] for l in range(lmin,lmax+1)]
for nexp in range(nexps):
line = blines.pop(0)
dat = line.split()
try:
bfun = [float(x) for x in dat]
except ValueError:
if DISABLE_EVAL:
raise ValueError('Failed to parse basis %s' % line)
else:
bfun = eval(','.join(dat))
exp = bfun.pop(0)
for i,l in enumerate(range(lmin,lmax+1)):
cl = [exp]
for c in range(ncontractions[i]):
cl.append(bfun.pop(0))
basis_n[i].append(tuple(cl))
basis.extend(basis_n)
basis_sorted = []
for l in range(MAXL):
basis_sorted.extend([b for b in basis if b[0] == l])
if optimize:
basis_sorted = parse_nwchem.optimize_contraction(basis_sorted)
basis_sorted = parse_nwchem.remove_zero(basis_sorted)
return basis_sorted
BASIS_SET_DELIMITER = re.compile('# *BASIS SET.*\n')
def search_seg(basisfile, symb):
with open(basisfile, 'r') as fin:
fdata = re.split(BASIS_SET_DELIMITER, fin.read())
for dat in fdata[1:]:
dat0 = dat.split(None, 1)
if dat0 and dat0[0] == symb:
# remove blank lines
return [x.strip() for x in dat.splitlines()
if x.strip() and 'END' not in x]
raise RuntimeError('Basis not found for %s in %s' % (symb, basisfile))
|
sunqm/pyscf
|
pyscf/pbc/gto/basis/parse_cp2k.py
|
Python
|
apache-2.0
| 3,189
|
[
"CP2K",
"PySCF"
] |
e65d282fb1bf60b4ec82fc2dd09a99557ea1426c70c8a5c59ca42afd6eb46531
|
## Without order, everything goes to chaos
## Some applicatons that can be made much easier by pre-sorting
import math
## INTERVALS PROBLEM
## Suppose that n closed intervals [a[i], b[i]] on the real line are given.
## Find the maximal k such that there exists a point covered by k intervals,
## i.e., the maximal number of layers. The complexity should be of nlogn
## e.g. for inputs [(0, 1), (0.1, 0.7), (0.3, 0.5)], k = 3
## Inspirations: Think of intervals as layers of an onion,
## when you come across a starting point, you enter another new layer,
## when you come across a ending point, you leave one previous layer.
## The layers could have overlaps, overlaps happen when you come across
## another layer before leaving the current one
## SORT all starting/ending points together, come across them in the order
## and count the current layers during the process. The max counting is
## the max overlaps
def max_intervals(intervals):
points = sum([ [(L, 'L'), (R, 'R')] for L, R in intervals], [])
labels = [label for pt, label in sorted(points, key = lambda (pt, label): pt)]
max_overlap = overlap = 0
for label in labels:
overlap += 1 if label == 'L' else -1
if overlap > max_overlap:
max_overlap = overlap
return max_overlap
## POLYGONAL ARC PROBLEM
## Assume that n points in the plane are given. Find a polygonal arc
## with n-1 sides whose vertices are the given points, and whose sides
## do not intersect. Adjacent sides may form a 180 degree angle.
## The number of operations should be of order nlogn
## SOLUTION: sort all the points with respect to the x-coordinate; when
## x-coordinates are equal, take the y-coordinate into account, then
## connect all vertices by line segments (essentially in the lexigraphic order)
## THE CONSTRAINT is: never go back along x axis
def arc(points):
points = sorted(points)
return points
## POLYGON PROBLEM (NOT NECESSARILY CONVEX)
## For a given set of points in the plane, find a polygon having these
## points as vertices.
## SOLUTION: Same as to the other similiar problems, FIND a specific order,
## and connect the points in that order
## For Polygon problem: take the leftmost point (with minimal x-coordinate),
## consider all the rays starting from this point and going through all other points.
## Sort these rays according to their (slopes, distances to initial point).
## The polygon goes from the initial point along the ray with minimal slope,
## then visits all the points in the order chosen, returning via the ray
## with maximal slope (where points are visited in the reversed order).
## In other words, the polygon is formed in counter-clockwise order
def polygon(points):
def slope(p1, p2):
x1, y1 = p1
x2, y2 = p2
return float(y2-y1) / (x2-x1) if x1 != x2 else float('inf')
def dist(p1, p2):
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
origin = min(points, key = lambda (x, y): x)
points = sorted(points, key = lambda pt: (slope(origin, pt), dist(origin, pt)))
return points
## CONVEX HULL PROBLEM
## Assume n points in the plane. Find their convex hull, i.e., the minimal convex
## polygon that contains all the points. e.g., A rubber band put on
## the nails is the convex hull of the nails inside it. The number of operations should
## be nlogn.
## A major difference between a convex hull of a set of points and a polygon of them is that
## for a convex hull, not all the points need to be the vertexes of the hull (they could be inside)
## HINT: Order all the points according to one of the orderings (i.e., (x,y) or (slope, distance))
## in the above two problems. Then construct the convex hull considering the points one by one
## To maintain information about the current convex hull, it is useful to use a deque. However,
## when the points are ordered according to their slopes, it is not necessary.
############################################################################################
####################################### Algorithms for Convex Hull #########################
## Several Convex Hull Algorithms from WIKI Page
## http://en.wikipedia.org/wiki/Convex_hull_algorithms
## 1. Akl-Toussaint heuristic (not the minimal convex hull):
## (1) find the convex quardrilateral by finding the 4 points with minx, maxx, miny, maxy
## that defines the boundary for other points except these four
## (2) The boundary can be defined as irregular shape by adding points with smallest and largest
## sums of x- and y- coordinates as well as smallest and largest differences of x- and y- coords
## the inside can be safely discarded
#############################################################################################
## Implementation of Andrew's monotone chain convex hull algorithm
## (1) sort points in lexicographic order according to x- and y- coords
## (2) construct upper and lower hulls of the points each in O(n) time in COUNTER-CLOCKWISE order
## The upper and lower hulls must be found separately because they are using different
## lexigraphic order of the points (lower uses the left->right order, and upper uses the right->left orders)
## even the convex hull boundary can be found in the same counter-clockwise order
def convex_hull(points):
"""
Input: iterable seqence of (x, y) pairs representing 2D points
Output: list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coord.
Implement Andrew's monotone chain algorithm in nlogn complexity
"""
## define 2D cross product of OA and OB vectors, i.e., the z-component
## of their 3d cross product.
## Returns a POSITIVE value, if OAB makes a counter-clockwise turn,
## NEGATIVE for clockwise turn, and zero if the points are COLLINEAR.
## (RIGHT HAND RULE)
## for 2D case, for vectors (x1, y1) and (x2, y2),
## their cross product is defined as x1y2 - x2y1
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
## remove duplicate points and sort them in lexigraph order
points = sorted(set(points))
## trivial case
if len(points) <= 1:
return points
## build lower hull - MUST RESERVER counter-clockwise
lower_hull = []
for p in points:
# check previous 2 points - NOT counter-clockwise, pop the middle one, otherwise added
while len(lower_hull) >= 2 and cross(lower_hull[-2], lower_hull[-1], p) <= 0:
lower_hull.pop()
lower_hull.append(p)
## build upper hull (visit in reverse order) - MUST RESERVER counter-clockwise
upper_hull = []
for p in points[::-1]:
while len(upper_hull) >= 2 and cross(upper_hull[-2], upper_hull[-1], p) <= 0:
upper_hull.pop()
upper_hull.append(p)
## concat lower and upper hulls
## last point of each list is omitted because it is repeated at the beginning of the other
return lower_hull[:-1] + upper_hull[:-1]
## Test
if __name__ == '__main__':
## test max_intervals
assert max_intervals([(0, 1), (0.1, 0.7), (0.3, 0.5)]) == 3
assert max_intervals([(0, 2), (1, 5), (3, 6), (4, 7)]) == 3
## test arc
line_points = [(0, 1), (0, 3), (0, 2)]
circle_points = [(math.cos(i*math.pi/4), math.sin(i*math.pi/4)) for i in range(8)]
circle_with_origin_points = circle_points + [(0, 0)]
assert arc(line_points) == [(0, 1), (0, 2), (0, 3)]
assert arc(circle_points) == sorted(circle_points)
## test polygon
assert polygon(line_points) == [(0, 1), (0, 2), (0, 3)]
#a whole pie
assert polygon(circle_points) == [
(-0.7071067811865477, -0.7071067811865475),
(-1.8369701987210297e-16, -1.0),
(0.7071067811865475, -0.7071067811865477),
(1.0, 0.0),
(0.7071067811865476, 0.7071067811865475),
(6.123233995736766e-17, 1.0),
(-0.7071067811865475, 0.7071067811865476),
(-1.0, 1.2246467991473532e-16)]
#a bitten pie - one piece missing
assert polygon(circle_with_origin_points) == [
(-0.7071067811865477, -0.7071067811865475),
(-1.8369701987210297e-16, -1.0),
(0.7071067811865475, -0.7071067811865477),
(0, 0),
(1.0, 0.0),
(0.7071067811865476, 0.7071067811865475),
(6.123233995736766e-17, 1.0),
(-0.7071067811865475, 0.7071067811865476),
(-1.0, 1.2246467991473532e-16)]
## test convex_hull
assert convex_hull(line_points) == [(0, 1), (0, 3)]
assert convex_hull(circle_points) == [
(-1.0, 1.2246467991473532e-16),
(-0.7071067811865477, -0.7071067811865475),
(-1.8369701987210297e-16, -1.0),
(0.7071067811865475, -0.7071067811865477),
(1.0, 0.0),
(0.7071067811865476, 0.7071067811865475),
(6.123233995736766e-17, 1.0),
(-0.7071067811865475, 0.7071067811865476)]
assert convex_hull(circle_with_origin_points) == [
(-1.0, 1.2246467991473532e-16),
(-0.7071067811865477, -0.7071067811865475),
(-1.8369701987210297e-16, -1.0),
(0.7071067811865475, -0.7071067811865477),
(1.0, 0.0),
(0.7071067811865476, 0.7071067811865475),
(6.123233995736766e-17, 1.0),
(-0.7071067811865475, 0.7071067811865476)]
print 'all tests pass'
|
bharcode/Coursera
|
python_algorithms/sorting/sort_applications.py
|
Python
|
gpl-2.0
| 9,440
|
[
"VisIt"
] |
4a0513f3853ce9d0562766ba4fbcf4b7e996328c7a0a828a182869c591b57da9
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFdbInfiniummethylationHg18(RPackage):
"""Compiled HumanMethylation27 and HumanMethylation450 annotations"""
# This is a bioconductor package but there is no available git repository
homepage = "http://bioconductor.org/packages/release/data/annotation/html/FDb.InfiniumMethylation.hg18.html"
url = "http://bioconductor.org/packages/release/data/annotation/src/contrib/FDb.InfiniumMethylation.hg18_2.2.0.tar.gz"
version('2.2.0', sha256='4a9028ac03c11fffbab731ea750bc7f9b0884fc43c6a8dac6eb2c644e4c79f6f')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.7.22:', type=('build', 'run'))
depends_on('r-txdb-hsapiens-ucsc-hg18-knowngene', type=('build', 'run'))
depends_on('r-org-hs-eg-db', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-fdb-infiniummethylation-hg18/package.py
|
Python
|
lgpl-2.1
| 1,117
|
[
"Bioconductor"
] |
e53e32de0fca6aa374f9018d3c1b4e733d9302fe09bd96ec48b961678012e856
|
# Copyright (c) 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
from six.moves.urllib.parse import urlparse
from swift.common.swob import Request, Response, HTTPUnauthorized
from swift.common.middleware import staticweb
meta_map = {
'c1': {'status': 401},
'c2': {},
'c3': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c3b': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c4': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css',
'web-directory-type': 'text/dir'}},
'c5': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css'}},
'c6': {'meta': {'web-listings': 't',
'web-error': 'error.html'}},
'c6b': {'meta': {'web-listings': 't',
'web-listings-label': 'foo'}},
'c7': {'meta': {'web-listings': 'f',
'web-error': 'error.html'}},
'c8': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'http://localhost/stylesheets/listing.css'}},
'c9': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'/absolute/listing.css'}},
'c10': {'meta': {'web-listings': 't'}},
'c11': {'meta': {'web-index': 'index.html'}},
'c11a': {'meta': {'web-index': 'index.html',
'web-directory-type': 'text/directory'}},
'c12': {'meta': {'web-index': 'index.html',
'web-error': 'error.html'}},
'c13': {'meta': {'web-listings': 'f',
'web-listings-css': 'listing.css'}},
}
def mock_get_container_info(env, app, swift_source='SW'):
container = env['PATH_INFO'].rstrip('/').split('/')[3]
container_info = meta_map[container]
container_info.setdefault('status', 200)
container_info.setdefault('read_acl', '.r:*')
return container_info
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.get_c4_called = False
def __call__(self, env, start_response):
self.calls += 1
if 'swift.authorize' in env:
resp = env['swift.authorize'](Request(env))
if resp:
return resp(env, start_response)
if env['PATH_INFO'] == '/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1':
return Response(
status='412 Precondition Failed')(env, start_response)
elif env['PATH_INFO'] == '/v1/a':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c1':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2/one.txt':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h1>Test main index.html file.</h1>
<p>Visit <a href="subdir">subdir</a>.</p>
<p>Don't visit <a href="subdir2/">subdir2</a> because it doesn't really
exist.</p>
<p>Visit <a href="subdir3">subdir3</a>.</p>
<p>Visit <a href="subdir3/subsubdir">subdir3/subsubdir</a>.</p>
</body>
</html>
''')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b/index.html':
resp = Response(status='204 No Content')
resp.app_iter = iter([])
return resp(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4':
self.get_c4_called = True
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/one.txt':
return Response(
status='200 Ok',
headers={'x-object-meta-test': 'value'},
body='1')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/two.txt':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c4/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/404error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Chrome's 404 fancy-page sucks.</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/index.html':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c5/503error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/404error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6b':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6/401error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Hey, you're not authorized to see this!</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c7', '/v1/a/c7/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c7/404error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c7/401error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Hey, you're not authorized to see this!</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c8', '/v1/a/c8/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c8/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c9', '/v1/a/c9/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c9/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c10', '/v1/a/c10/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c10/\xe2\x98\x83/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c11', '/v1/a/c11/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir/':
return Response(status='200 Ok', headers={
'Content-Type': 'application/directory'})(
env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h2>c11 subdir index</h2>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir2/':
return Response(status='200 Ok', headers={'Content-Type':
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir2/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c11a', '/v1/a/c11a/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir/':
return Response(status='200 Ok', headers={'Content-Type':
'text/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir2/':
return Response(status='200 Ok', headers={'Content-Type':
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir2/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir3/':
return Response(status='200 Ok', headers={'Content-Type':
'not_a/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir3/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c12/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c12/200error.html':
return Response(status='200 Ok', body='error file')(env,
start_response)
else:
raise Exception('Unknown path %r' % env['PATH_INFO'])
def listing(self, env, start_response):
headers = {'x-container-read': '.r:*'}
if ((env['PATH_INFO'] in (
'/v1/a/c3', '/v1/a/c4', '/v1/a/c8', '/v1/a/c9'))
and (env['QUERY_STRING'] ==
'delimiter=/&prefix=subdir/')):
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"name":"subdir/2.txt",
"hash":"c85c1dcd19cf5cbac84e6043c31bb63e", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.734140"},
{"subdir":"subdir3/subsubdir/"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \
'delimiter=/&prefix=subdiry/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '[]'
elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \
'limit=1&delimiter=/&prefix=subdirz/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdirz/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c6' and env['QUERY_STRING'] == \
'limit=1&delimiter=/&prefix=subdir/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'X-Container-Web-Listings': 't',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c10' and (
env['QUERY_STRING'] ==
'delimiter=/&prefix=%E2%98%83/' or
env['QUERY_STRING'] ==
'delimiter=/&prefix=%E2%98%83/%E2%98%83/'):
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'X-Container-Web-Listings': 't',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"\u2603/\u2603/one.txt",
"hash":"73f1dd69bacbf0847cc9cffa3c6b23a1", "bytes":22,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"subdir":"\u2603/\u2603/"}]
'''.strip()
elif 'prefix=' in env['QUERY_STRING']:
return Response(status='204 No Content')(env, start_response)
else:
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"401error.html",
"hash":"893f8d80692a4d3875b45be8f152ad18", "bytes":110,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.713710"},
{"name":"404error.html",
"hash":"62dcec9c34ed2b347d94e6ca707aff8c", "bytes":130,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.720850"},
{"name":"index.html",
"hash":"8b469f2ca117668a5131fe9ee0815421", "bytes":347,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.683590"},
{"name":"listing.css",
"hash":"7eab5d169f3fcd06a08c130fa10c5236", "bytes":17,
"content_type":"text/css",
"last_modified":"2011-03-24T04:27:52.721610"},
{"name":"one.txt", "hash":"73f1dd69bacbf0847cc9cffa3c6b23a1",
"bytes":22, "content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.722270"},
{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"name":"subdir/2.txt",
"hash":"c85c1dcd19cf5cbac84e6043c31bb63e", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.734140"},
{"name":"subdir/\u2603.txt",
"hash":"7337d028c093130898d937c319cc9865", "bytes":72981,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.735460"},
{"name":"subdir2", "hash":"d41d8cd98f00b204e9800998ecf8427e",
"bytes":0, "content_type":"text/directory",
"last_modified":"2011-03-24T04:27:52.676690"},
{"name":"subdir3/subsubdir/index.html",
"hash":"04eea67110f883b1a5c97eb44ccad08c", "bytes":72,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.751260"},
{"name":"two.txt", "hash":"10abb84c63a5cff379fdfd6385918833",
"bytes":22, "content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.825110"},
{"name":"\u2603/\u2603/one.txt",
"hash":"73f1dd69bacbf0847cc9cffa3c6b23a1", "bytes":22,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.935560"}]
'''.strip()
return Response(status='200 Ok', headers=headers,
body=body)(env, start_response)
class FakeAuthFilter(object):
def __init__(self, app, deny_objects=False, deny_listing=False):
self.app = app
self.deny_objects = deny_objects
self.deny_listing = deny_listing
def authorize(self, req):
path_parts = req.path.strip('/').split('/')
if ((self.deny_objects and len(path_parts) > 3)
or (self.deny_listing and len(path_parts) == 3)):
return HTTPUnauthorized()
def __call__(self, env, start_response):
env['swift.authorize'] = self.authorize
return self.app(env, start_response)
class TestStaticWeb(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app))
self._orig_get_container_info = staticweb.get_container_info
staticweb.get_container_info = mock_get_container_info
def tearDown(self):
staticweb.get_container_info = self._orig_get_container_info
def test_app_set(self):
app = FakeApp()
sw = staticweb.filter_factory({})(app)
self.assertEqual(sw.app, app)
def test_conf_set(self):
conf = {'blah': 1}
sw = staticweb.filter_factory(conf)(FakeApp())
self.assertEqual(sw.conf, conf)
def test_root(self):
resp = Request.blank('/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_version(self):
resp = Request.blank('/v1').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 412)
def test_account(self):
resp = Request.blank('/v1/a').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 401)
def test_container1(self):
resp = Request.blank('/v1/a/c1').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 401)
def test_container1_web_mode_explicitly_off(self):
resp = Request.blank('/v1/a/c1',
headers={'x-web-mode': 'false'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 401)
def test_container1_web_mode_explicitly_on(self):
resp = Request.blank('/v1/a/c1',
headers={'x-web-mode': 'true'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container2(self):
resp = Request.blank('/v1/a/c2').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2_web_mode_explicitly_off(self):
resp = Request.blank(
'/v1/a/c2',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2_web_mode_explicitly_on(self):
resp = Request.blank(
'/v1/a/c2',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container2onetxt(self):
resp = Request.blank(
'/v1/a/c2/one.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container2json(self):
resp = Request.blank(
'/v1/a/c2').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2json_web_mode_explicitly_off(self):
resp = Request.blank(
'/v1/a/c2',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2json_web_mode_explicitly_on(self):
resp = Request.blank(
'/v1/a/c2',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container3(self):
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers['location'],
'http://localhost/v1/a/c3/')
def test_container3indexhtml(self):
resp = Request.blank('/v1/a/c3/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Test main index.html file.', resp.body)
def test_container3subsubdir(self):
resp = Request.blank(
'/v1/a/c3/subdir3/subsubdir').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container3subsubdircontents(self):
resp = Request.blank(
'/v1/a/c3/subdir3/subsubdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'index file')
def test_container3subdir(self):
resp = Request.blank(
'/v1/a/c3/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c3/subdir/', resp.body)
self.assertIn(b'</style>', resp.body)
self.assertNotIn(b'<link', resp.body)
self.assertNotIn(b'listing.css', resp.body)
def test_container3subdirx(self):
resp = Request.blank(
'/v1/a/c3/subdirx/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container3subdiry(self):
resp = Request.blank(
'/v1/a/c3/subdiry/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
def test_container3subdirz(self):
resp = Request.blank(
'/v1/a/c3/subdirz').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container3unknown(self):
resp = Request.blank(
'/v1/a/c3/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertNotIn(b"Chrome's 404 fancy-page sucks.", resp.body)
def test_container3bindexhtml(self):
resp = Request.blank('/v1/a/c3b/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.body, b'')
def test_container4indexhtml(self):
resp = Request.blank('/v1/a/c4/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c4/', resp.body)
self.assertIn(b'href="listing.css"', resp.body)
def test_container4indexhtmlauthed(self):
resp = Request.blank('/v1/a/c4').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
resp = Request.blank(
'/v1/a/c4',
environ={'REMOTE_USER': 'authed'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
resp = Request.blank(
'/v1/a/c4', headers={'x-web-mode': 't'},
environ={'REMOTE_USER': 'authed'}).get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container4unknown(self):
resp = Request.blank(
'/v1/a/c4/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertIn(b"Chrome's 404 fancy-page sucks.", resp.body)
def test_container4subdir(self):
resp = Request.blank(
'/v1/a/c4/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c4/subdir/', resp.body)
self.assertNotIn(b'</style>', resp.body)
self.assertIn(b'<link', resp.body)
self.assertIn(b'href="../listing.css"', resp.body)
self.assertEqual(resp.headers['content-type'],
'text/html; charset=UTF-8')
def test_container4onetxt(self):
resp = Request.blank(
'/v1/a/c4/one.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_container4twotxt(self):
resp = Request.blank(
'/v1/a/c4/two.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 503)
def test_container5indexhtml(self):
resp = Request.blank('/v1/a/c5/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 503)
def test_container5unknown(self):
resp = Request.blank(
'/v1/a/c5/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertNotIn(b"Chrome's 404 fancy-page sucks.", resp.body)
def test_container6subdir(self):
resp = Request.blank(
'/v1/a/c6/subdir').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
def test_container6listing(self):
# container6 has web-listings = t, web-error=error.html
resp = Request.blank('/v1/a/c6/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
# expect custom 401 if request is not auth'd for listing but is auth'd
# to GET objects
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True)
resp = Request.blank('/v1/a/c6/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertIn(b"Hey, you're not authorized to see this!", resp.body)
# expect default 401 if request is not auth'd for listing or object GET
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True,
deny_objects=True)
resp = Request.blank('/v1/a/c6/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertNotIn(b"Hey, you're not authorized to see this!", resp.body)
def test_container6blisting(self):
label = 'Listing of {0}/'.format(
meta_map['c6b']['meta']['web-listings-label'])
resp = Request.blank('/v1/a/c6b/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(label.encode('utf-8'), resp.body)
def test_container7listing(self):
# container7 has web-listings = f, web-error=error.html
resp = Request.blank('/v1/a/c7/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertIn(b"Web Listing Disabled", resp.body)
# expect 301 if auth'd but no trailing '/'
resp = Request.blank('/v1/a/c7').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
# expect default 401 if request is not auth'd and no trailing '/'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True,
deny_objects=True)
resp = Request.blank('/v1/a/c7').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertNotIn(b"Hey, you're not authorized to see this!", resp.body)
# expect custom 401 if request is not auth'd for listing
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True)
resp = Request.blank('/v1/a/c7/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertIn(b"Hey, you're not authorized to see this!", resp.body)
# expect default 401 if request is not auth'd for listing or object GET
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({})(self.app), deny_listing=True,
deny_objects=True)
resp = Request.blank('/v1/a/c7/').get_response(test_staticweb)
self.assertEqual(resp.status_int, 401)
self.assertNotIn(b"Hey, you're not authorized to see this!", resp.body)
def test_container8listingcss(self):
resp = Request.blank(
'/v1/a/c8/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c8/', resp.body)
self.assertIn(b'<link', resp.body)
self.assertIn(b'href="http://localhost/stylesheets/listing.css"',
resp.body)
def test_container8subdirlistingcss(self):
resp = Request.blank(
'/v1/a/c8/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c8/subdir/', resp.body)
self.assertIn(b'<link', resp.body)
self.assertIn(b'href="http://localhost/stylesheets/listing.css"',
resp.body)
def test_container9listingcss(self):
resp = Request.blank(
'/v1/a/c9/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c9/', resp.body)
self.assertIn(b'<link', resp.body)
self.assertIn(b'href="/absolute/listing.css"', resp.body)
def test_container9subdirlistingcss(self):
resp = Request.blank(
'/v1/a/c9/subdir/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c9/subdir/', resp.body)
self.assertIn(b'<link', resp.body)
self.assertIn(b'href="/absolute/listing.css"', resp.body)
def test_container10unicodesubdirlisting(self):
resp = Request.blank(
'/v1/a/c10/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c10/', resp.body)
resp = Request.blank(
'/v1/a/c10/\xe2\x98\x83/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'Listing of /v1/a/c10/\xe2\x98\x83/', resp.body)
resp = Request.blank(
'/v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/'
).get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(
b'Listing of /v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/', resp.body)
def test_container11subdirmarkerobjectindex(self):
resp = Request.blank('/v1/a/c11/subdir/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'<h2>c11 subdir index</h2>', resp.body)
def test_container11subdirmarkermatchdirtype(self):
resp = Request.blank('/v1/a/c11a/subdir/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertIn(b'Index File Not Found', resp.body)
def test_container11subdirmarkeraltdirtype(self):
resp = Request.blank('/v1/a/c11a/subdir2/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_container11subdirmarkerinvaliddirtype(self):
resp = Request.blank('/v1/a/c11a/subdir3/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_container12unredirectedrequest(self):
resp = Request.blank('/v1/a/c12/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertIn(b'index file', resp.body)
def test_container_404_has_css(self):
resp = Request.blank('/v1/a/c13/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertIn(b'listing.css', resp.body)
def test_container_404_has_no_css(self):
resp = Request.blank('/v1/a/c7/').get_response(
self.test_staticweb)
self.assertEqual(resp.status_int, 404)
self.assertNotIn(b'listing.css', resp.body)
self.assertIn(b'<style', resp.body)
def test_subrequest_once_if_possible(self):
resp = Request.blank(
'/v1/a/c4/one.txt').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-object-meta-test'], 'value')
self.assertEqual(resp.body, b'1')
self.assertEqual(self.app.calls, 1)
def test_no_auth_middleware(self):
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 301)
# Test without an authentication middleware before staticweb
# This is no longer handled by staticweb middleware, thus not returning
# a 301 redirect
self.test_staticweb = staticweb.filter_factory({})(self.app)
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200)
def test_subrequest_not_override_auth(self):
app_call = \
'swift.common.middleware.staticweb._StaticWebContext._app_call'
orig_app_call = staticweb._StaticWebContext._app_call
_fail = self.fail
def hook_app_call(self, env):
if 'swift.authorize_override' in env:
_fail('staticweb must not create authorize info by itself')
return orig_app_call(self, env)
with mock.patch(app_call, hook_app_call):
# testing for _listing container
resp = Request.blank('/v1/a/c4/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 200) # sanity
# testing for _listing object subdir
resp = Request.blank(
'/v1/a/c4/unknown').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 404)
# testing for _error_response
resp = Request.blank('/v1/a/c5/').get_response(self.test_staticweb)
self.assertEqual(resp.status_int, 503) # sanity
class TestStaticWebUrlBase(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self._orig_get_container_info = staticweb.get_container_info
staticweb.get_container_info = mock_get_container_info
def tearDown(self):
staticweb.get_container_info = self._orig_get_container_info
def test_container3subdirz_scheme(self):
path = '/v1/a/c3/subdirz'
scheme = 'https'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({'url_base': 'https://'})(self.app))
resp = Request.blank(path).get_response(test_staticweb)
self.assertEqual(resp.status_int, 301)
parsed = urlparse(resp.location)
self.assertEqual(parsed.scheme, scheme)
# We omit comparing netloc here, because swob is free to add port.
self.assertEqual(parsed.path, path + '/')
def test_container3subdirz_host(self):
path = '/v1/a/c3/subdirz'
netloc = 'example.com'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({
'url_base': '//%s' % (netloc,)})(self.app))
resp = Request.blank(path).get_response(test_staticweb)
self.assertEqual(resp.status_int, 301)
parsed = urlparse(resp.location)
# We compare scheme with the default. This may change, but unlikely.
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.netloc, netloc)
self.assertEqual(parsed.path, path + '/')
def test_container3subdirz_both(self):
path = '/v1/a/c3/subdirz'
scheme = 'http'
netloc = 'example.com'
test_staticweb = FakeAuthFilter(
staticweb.filter_factory({
'url_base': 'http://example.com'})(self.app))
resp = Request.blank(path).get_response(test_staticweb)
self.assertEqual(resp.status_int, 301)
parsed = urlparse(resp.location)
self.assertEqual(parsed.scheme, scheme)
self.assertEqual(parsed.netloc, netloc)
self.assertEqual(parsed.path, path + '/')
if __name__ == '__main__':
unittest.main()
|
openstack/swift
|
test/unit/common/middleware/test_staticweb.py
|
Python
|
apache-2.0
| 41,104
|
[
"VisIt"
] |
5c0183dcc69bb7b1b0c5fcbc2a9cfabd66473b68a6b5ebd298edd9ea0842fa17
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/gaussian_process/tests/test_gaussian_process.py
|
Python
|
gpl-2.0
| 6,813
|
[
"Gaussian"
] |
292232045670d8ed7f9502cf6f01ffee4c2a7cd26d2d31650593f82309b75d0c
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Sławek Piotrowski <sentinel@atteo.org>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class Decoder(srd.Decoder):
api_version = 3
id = 'rfm12'
name = 'RFM12'
longname = 'HopeRF RFM12'
desc = 'HopeRF RFM12 wireless transceiver control protocol.'
license = 'gplv2+'
inputs = ['spi']
outputs = []
tags = ['Wireless/RF']
annotations = (
('cmd', 'Command'),
('param', 'Command parameter'),
('disabled', 'Disabled bit'),
('return', 'Returned value'),
('disabled_return', 'Disabled returned value'),
('interpretation', 'Interpretation'),
)
annotation_rows = (
('commands', 'Commands', (0, 1, 2)),
('returns', 'Returns', (3, 4)),
('interpretations', 'Interpretations', (5,)),
)
def __init__(self):
self.reset()
def reset(self):
self.mosi_bytes, self.miso_bytes = [], []
self.mosi_bits, self.miso_bits = [], []
self.row_pos = [0, 0, 0]
self.ann_to_row = [0, 0, 0, 1, 1, 2]
# Initialize with Power-On-Reset values.
self.last_status = [0x00, 0x00]
self.last_config = 0x08
self.last_power = 0x08
self.last_freq = 0x680
self.last_data_rate = 0x23
self.last_fifo_and_reset = 0x80
self.last_afc = 0xF7
self.last_transceiver = 0x00
self.last_pll = 0x77
def advance_ann(self, ann, length):
row = self.ann_to_row[ann]
self.row_pos[row] += length
def putx(self, ann, length, description):
if not isinstance(description, list):
description = [description]
row = self.ann_to_row[ann]
bit = self.row_pos[row]
self.put(self.mosi_bits[bit][1], self.mosi_bits[bit + length - 1][2],
self.out_ann, [ann, description])
bit += length
self.row_pos[row] = bit
def describe_bits(self, data, names):
i = 0x01 << len(names) - 1
bit = 0
while i != 0:
if names[bit] != '':
self.putx(1 if (data & i) else 2, 1, names[bit])
i >>= 1
bit += 1
def describe_return_bits(self, data, names):
i = 0x01 << len(names) - 1
bit = 0
while i != 0:
if names[bit] != '':
self.putx(3 if (data & i) else 4, 1, names[bit])
else:
self.advance_ann(3, 1)
i >>= 1
bit += 1
def describe_changed_bits(self, data, old_data, names):
changes = data ^ old_data
i = 0x01 << (len(names) - 1)
bit = 0
while i != 0:
if names[bit] != '' and changes & i:
s = ['+', 'Turning on'] if (data & i) else ['-', 'Turning off']
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
i >>= 1
bit += 1
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_configuration_cmd(self, cmd, ret):
self.putx(0, 8, ['Configuration command', 'Configuration'])
NAMES = [['Internal data register', 'el'], ['FIFO mode', 'ef']]
bits = (cmd[1] & 0xC0) >> 6
old_bits = (self.last_config & 0xC0) >> 6
self.describe_bits(bits, NAMES)
self.describe_changed_bits(bits, old_bits, NAMES)
FREQUENCIES = ['315', '433', '868', '915']
f = FREQUENCIES[(cmd[1] & 0x30) >> 4] + 'MHz'
self.putx(1, 2, ['Frequency: ' + f, f])
if cmd[1] & 0x30 != self.last_config & 0x30:
self.putx(5, 2, ['Changed', '~'])
c = '%.1fpF' % (8.5 + (cmd[1] & 0xF) * 0.5)
self.putx(1, 4, ['Capacitance: ' + c, c])
if cmd[1] & 0xF != self.last_config & 0xF:
self.putx(5, 4, ['Changed', '~'])
self.last_config = cmd[1]
def handle_power_management_cmd(self, cmd, ret):
self.putx(0, 8, ['Power management', 'Power'])
NAMES = [['Receiver chain', 'er'], ['Baseband circuit', 'ebb'],
['Transmission', 'et'], ['Synthesizer', 'es'],
['Crystal oscillator', 'ex'], ['Low battery detector', 'eb'],
['Wake-up timer', 'ew'], ['Clock output off switch', 'dc']]
self.describe_bits(cmd[1], NAMES)
power = cmd[1]
# Some bits imply other, even if they are set to 0.
if power & 0x80:
power |= 0x58
if power & 0x20:
power |= 0x18
self.describe_changed_bits(power, self.last_power, NAMES)
self.last_power = power
def handle_frequency_setting_cmd(self, cmd, ret):
self.putx(0, 4, ['Frequency setting', 'Frequency'])
f = ((cmd[1] & 0xF) << 8) + cmd[2]
self.putx(0, 12, ['F = %3.4f' % f])
self.row_pos[2] -= 4
if self.last_freq != f:
self.putx(5, 12, ['Changing', '~'])
self.last_freq = f
def handle_data_rate_cmd(self, cmd, ret):
self.putx(0, 8, ['Data rate command', 'Data rate'])
r = cmd[1] & 0x7F
cs = (cmd[1] & 0x80) >> 7
rate = 10000 / 29.0 / (r + 1) / (1 + 7 * cs)
self.putx(0, 8, ['%3.1fkbps' % rate])
if self.last_data_rate != cmd[1]:
self.putx(5, 8, ['Changing', '~'])
self.last_data_rate = cmd[1]
def handle_receiver_control_cmd(self, cmd, ret):
self.putx(0, 5, ['Receiver control command'])
s = 'interrupt input' if (cmd[0] & 0x04) else 'VDI output'
self.putx(0, 1, ['pin16 = ' + s])
VDI_NAMES = ['Fast', 'Medium', 'Slow', 'Always on']
vdi_speed = VDI_NAMES[cmd[0] & 0x3]
self.putx(0, 2, ['VDI: %s' % vdi_speed])
BANDWIDTH_NAMES = ['Reserved', '400kHz', '340kHz', '270kHz', '200kHz',
'134kHz', '67kHz', 'Reserved']
bandwidth = BANDWIDTH_NAMES[(cmd[1] & 0xE0) >> 5]
self.putx(0, 3, ['Bandwidth: %s' % bandwidth])
LNA_GAIN_NAMES = [0, -6, -14, -20]
lna_gain = LNA_GAIN_NAMES[(cmd[1] & 0x18) >> 3]
self.putx(0, 2, ['LNA gain: %ddB' % lna_gain])
RSSI_THRESHOLD_NAMES = ['-103', '-97', '-91', '-85', '-79', '-73',
'Reserved', 'Reserved']
rssi_threshold = RSSI_THRESHOLD_NAMES[cmd[1] & 0x7]
self.putx(0, 3, ['RSSI threshold: %s' % rssi_threshold])
def handle_data_filter_cmd(self, cmd, ret):
self.putx(0, 8, ['Data filter command'])
if cmd[1] & 0x80:
clock_recovery = 'auto'
elif cmd[1] & 0x40:
clock_recovery = 'fast'
else:
clock_recovery = 'slow'
self.putx(0, 2, ['Clock recovery: %s mode' % clock_recovery])
self.advance_ann(0, 1) # Should always be 1.
s = 'analog' if (cmd[1] & 0x10) else 'digital'
self.putx(0, 1, ['Data filter: ' + s])
self.advance_ann(0, 1) # Should always be 1.
self.putx(0, 3, ['DQD threshold: %d' % (cmd[1] & 0x7)])
def handle_fifo_and_reset_cmd(self, cmd, ret):
self.putx(0, 8, ['FIFO and reset command'])
fifo_level = (cmd[1] & 0xF0) >> 4
self.putx(0, 4, ['FIFO trigger level: %d' % fifo_level])
last_fifo_level = (self.last_fifo_and_reset & 0xF0) >> 4
if fifo_level != last_fifo_level:
self.putx(5, 4, ['Changing', '~'])
else:
self.advance_ann(5, 4)
s = 'one byte' if (cmd[1] & 0x08) else 'two bytes'
self.putx(0, 1, ['Synchron length: ' + s])
if (cmd[1] & 0x08) != (self.last_fifo_and_reset & 0x08):
self.putx(5, 1, ['Changing', '~'])
else:
self.advance_ann(5, 1)
if cmd[1] & 0x04:
fifo_fill = 'Always'
elif cmd[1] & 0x02:
fifo_fill = 'After synchron pattern'
else:
fifo_fill = 'Never'
self.putx(0, 2, ['FIFO fill: %s' % fifo_fill])
if (cmd[1] & 0x06) != (self.last_fifo_and_reset & 0x06):
self.putx(5, 2, ['Changing', '~'])
else:
self.advance_ann(5, 2)
s = 'non-sensitive' if (cmd[1] & 0x01) else 'sensitive'
self.putx(0, 1, ['Reset mode: ' + s])
if (cmd[1] & 0x01) != (self.last_fifo_and_reset & 0x01):
self.putx(5, 1, ['Changing', '~'])
else:
self.advance_ann(5, 1)
self.last_fifo_and_reset = cmd[1]
def handle_synchron_pattern_cmd(self, cmd, ret):
self.putx(0, 8, ['Synchron pattern command'])
if self.last_fifo_and_reset & 0x08:
self.putx(0, 8, ['Pattern: 0x2D%02X' % pattern])
else:
self.putx(0, 8, ['Pattern: %02X' % pattern])
def handle_fifo_read_cmd(self, cmd, ret):
self.putx(0, 8, ['FIFO read command', 'FIFO read'])
self.putx(3, 8, ['Data: %02X' % ret[1]])
def handle_afc_cmd(self, cmd, ret):
self.putx(0, 8, ['AFC command'])
MODES = ['Off', 'Once', 'During receiving', 'Always']
mode = (cmd[1] & 0xC0) >> 6
self.putx(0, 2, ['Mode: %s' % MODES[mode]])
if (cmd[1] & 0xC0) != (self.last_afc & 0xC0):
self.putx(5, 2, ['Changing', '~'])
else:
self.advance_ann(5, 2)
range_limit = (cmd[1] & 0x30) >> 4
FREQ_TABLE = [0.0, 2.5, 5.0, 7.5]
freq_delta = FREQ_TABLE[(self.last_config & 0x30) >> 4]
if range_limit == 0:
self.putx(0, 2, ['Range: No limit'])
elif range_limit == 1:
self.putx(0, 2, ['Range: +/-%dkHz' % (15 * freq_delta)])
elif range_limit == 2:
self.putx(0, 2, ['Range: +/-%dkHz' % (7 * freq_delta)])
elif range_limit == 3:
self.putx(0, 2, ['Range: +/-%dkHz' % (3 * freq_delta)])
if (cmd[1] & 0x30) != (self.last_afc & 0x30):
self.putx(5, 2, ['Changing', '~'])
else:
self.advance_ann(5, 2)
NAMES = ['Strobe edge', 'High accuracy mode', 'Enable offset register',
'Enable offset calculation']
self.describe_bits(cmd[1] & 0xF, NAMES)
self.describe_changed_bits(cmd[1] & 0xF, self.last_afc & 0xF, NAMES)
self.last_afc = cmd[1]
def handle_transceiver_control_cmd(self, cmd, ret):
self.putx(0, 8, ['Transceiver control command'])
self.putx(0, 4, ['FSK frequency delta: %dkHz' % (15 * ((cmd[1] & 0xF0) >> 4))])
if cmd[1] & 0xF0 != self.last_transceiver & 0xF0:
self.putx(5, 4, ['Changing', '~'])
else:
self.advance_ann(5, 4)
POWERS = [0, -2.5, -5, -7.5, -10, -12.5, -15, -17.5]
self.advance_ann(0, 1)
self.advance_ann(5, 1)
self.putx(0,3, ['Relative power: %dB' % (cmd[1] & 0x07)])
if (cmd[1] & 0x07) != (self.last_transceiver & 0x07):
self.putx(5, 3, ['Changing', '~'])
else:
self.advance_ann(5, 3)
self.last_transceiver = cmd[1]
def handle_pll_setting_cmd(self, cmd, ret):
self.putx(0, 8, ['PLL setting command'])
self.advance_ann(0, 1)
self.putx(0, 2, ['Clock buffer rise and fall time'])
self.advance_ann(0, 1)
self.advance_ann(5, 4)
NAMES = [['Delay in phase detector', 'dly'], ['Disable dithering', 'ddit']]
self.describe_bits((cmd[1] & 0xC) >> 2, NAMES)
self.describe_changed_bits((cmd[1] & 0xC) >> 2, (self.last_pll & 0xC) >> 2, NAMES)
s = '256kbps, high' if (cmd[1] & 0x01) else '86.2kbps, low'
self.putx(0, 1, ['Max bit rate: %s noise' % s])
self.advance_ann(5, 1)
if (cmd[1] & 0x01) != (self.last_pll & 0x01):
self.putx(5, 1, ['Changing', '~'])
self.last_pll = cmd[1]
def handle_transmitter_register_cmd(self, cmd, ret):
self.putx(0, 8, ['Transmitter register command', 'Transmit'])
self.putx(0, 8, ['Data: %s' % cmd[1], '%s' % cmd[1]])
def handle_software_reset_cmd(self, cmd, ret):
self.putx(0, 16, ['Software reset command'])
def handle_wake_up_timer_cmd(self, cmd, ret):
self.putx(0, 3, ['Wake-up timer command', 'Timer'])
r = cmd[0] & 0x1F
m = cmd[1]
time = 1.03 * m * pow(2, r) + 0.5
self.putx(0, 13, ['Time: %7.2f' % time])
def handle_low_duty_cycle_cmd(self, cmd, ret):
self.putx(0, 16, ['Low duty cycle command'])
def handle_low_battery_detector_cmd(self, cmd, ret):
self.putx(0, 8, ['Low battery detector command'])
NAMES = ['1', '1.25', '1.66', '2', '2.5', '3.33', '5', '10']
clock = NAMES[(cmd[1] & 0xE0) >> 5]
self.putx(0, 3, ['Clock output: %sMHz' % clock, '%sMHz' % clock])
self.advance_ann(0, 1)
v = 2.25 + (cmd[1] & 0x0F) * 0.1
self.putx(0, 4, ['Low battery voltage: %1.2fV' % v, '%1.2fV' % v])
def handle_status_read_cmd(self, cmd, ret):
self.putx(0, 8, ['Status read command', 'Status'])
NAMES = ['RGIT/FFIT', 'POR', 'RGUR/FFOV', 'WKUP', 'EXT', 'LBD',
'FFEM', 'RSSI/ATS', 'DQD', 'CRL', 'ATGL']
status = (ret[0] << 3) + (ret[1] >> 5)
self.row_pos[1] -= 8
self.row_pos[2] -= 8
self.describe_return_bits(status, NAMES)
receiver_enabled = (self.last_power & 0x80) >> 7
if ret[0] & 0x80:
if receiver_enabled:
s = 'Received data in FIFO'
else:
s = 'Transmit register ready'
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
if ret[0] & 0x40:
self.putx(5, 1, 'Power on Reset')
else:
self.advance_ann(5, 1)
if ret[0] & 0x20:
if receiver_enabled:
s = 'RX FIFO overflow'
else:
s = 'Transmit register under run'
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
if ret[0] & 0x10:
self.putx(5, 1, 'Wake-up timer')
else:
self.advance_ann(5, 1)
if ret[0] & 0x08:
self.putx(5, 1, 'External interrupt')
else:
self.advance_ann(5, 1)
if ret[0] & 0x04:
self.putx(5, 1, 'Low battery')
else:
self.advance_ann(5, 1)
if ret[0] & 0x02:
self.putx(5, 1, 'FIFO is empty')
else:
self.advance_ann(5, 1)
if ret[0] & 0x01:
if receiver_enabled:
s = 'Incoming signal above limit'
else:
s = 'Antenna detected RF signal'
self.putx(5, 1, s)
else:
self.advance_ann(5, 1)
if ret[1] & 0x80:
self.putx(5, 1, 'Data quality detector')
else:
self.advance_ann(5, 1)
if ret[1] & 0x40:
self.putx(5, 1, 'Clock recovery locked')
else:
self.advance_ann(5, 1)
self.advance_ann(5, 1)
self.putx(3, 5, ['AFC offset'])
if (self.last_status[1] & 0x1F) != (ret[1] & 0x1F):
self.putx(5, 5, ['Changed', '~'])
self.last_status = ret
def handle_cmd(self, cmd, ret):
if cmd[0] == 0x80:
self.handle_configuration_cmd(cmd, ret)
elif cmd[0] == 0x82:
self.handle_power_management_cmd(cmd, ret)
elif cmd[0] & 0xF0 == 0xA0:
self.handle_frequency_setting_cmd(cmd, ret)
elif cmd[0] == 0xC6:
self.handle_data_rate_cmd(cmd, ret)
elif cmd[0] & 0xF8 == 0x90:
self.handle_receiver_control_cmd(cmd, ret)
elif cmd[0] == 0xC2:
self.handle_data_filter_cmd(cmd, ret)
elif cmd[0] == 0xCA:
self.handle_fifo_and_reset_cmd(cmd, ret)
elif cmd[0] == 0xCE:
self.handle_synchron_pattern_cmd(cmd, ret)
elif cmd[0] == 0xB0:
self.handle_fifo_read_cmd(cmd, ret)
elif cmd[0] == 0xC4:
self.handle_afc_cmd(cmd, ret)
elif cmd[0] & 0xFE == 0x98:
self.handle_transceiver_control_cmd(cmd, ret)
elif cmd[0] == 0xCC:
self.handle_pll_setting_cmd(cmd, ret)
elif cmd[0] == 0xB8:
self.handle_transmitter_register_cmd(cmd, ret)
elif cmd[0] == 0xFE:
self.handle_software_reset_cmd(cmd, ret)
elif cmd[0] & 0xE0 == 0xE0:
self.handle_wake_up_timer_cmd(cmd, ret)
elif cmd[0] == 0xC8:
self.handle_low_duty_cycle_cmd(cmd, ret)
elif cmd[0] == 0xC0:
self.handle_low_battery_detector_cmd(cmd, ret)
elif cmd[0] == 0x00:
self.handle_status_read_cmd(cmd, ret)
else:
c = '%02x %02x' % tuple(cmd)
r = '%02x %02x' % tuple(ret)
self.putx(0, 16, ['Unknown command: %s (reply: %s)!' % (c, r)])
def decode(self, ss, es, data):
ptype, mosi, miso = data
# For now, only use DATA and BITS packets.
if ptype not in ('DATA', 'BITS'):
return
# Store the individual bit values and ss/es numbers. The next packet
# is guaranteed to be a 'DATA' packet belonging to this 'BITS' one.
if ptype == 'BITS':
if mosi is not None:
self.mosi_bits.extend(reversed(mosi))
if miso is not None:
self.miso_bits.extend(reversed(miso))
return
# Append new bytes.
self.mosi_bytes.append(mosi)
self.miso_bytes.append(miso)
# All commands consist of 2 bytes.
if len(self.mosi_bytes) < 2:
return
self.row_pos = [0, 8, 8]
self.handle_cmd(self.mosi_bytes, self.miso_bytes)
self.mosi_bytes, self.miso_bytes = [], []
self.mosi_bits, self.miso_bits = [], []
|
sigrokproject/libsigrokdecode
|
decoders/rfm12/pd.py
|
Python
|
gpl-3.0
| 18,375
|
[
"CRYSTAL"
] |
ce4a9d8a02c39a20ab479038056b416344cdc31841ad092097b6941e512b3028
|
import numpy as np
import logging
from collections import namedtuple
#from bfgs import lineSearch, BFGS
from optimization_exceptions import LineSearchError
from pele.optimize import Result
__all__ = ["LBFGS"]
_logger = logging.getLogger("pele.optimize")
class LBFGS(object):
"""
minimize a function using the LBFGS routine
Parameters
----------
X : array
the starting configuration for the minimization
pot :
the potential object
nsteps : int
the maximum number of iterations
tol : float
the minimization will stop when the rms grad is less than tol
iprint : int
how often to print status information
maxstep : float
the maximum step size
maxErise : float
the maximum the energy is alowed to rise during a step.
The step size will be reduced until this condition is satisfied.
M : int
the number of previous iterations to use in determining the optimal step
rel_energy : bool
if True, then maxErise the the *relative* maximum the energy is allowed
to rise during a step
H0 : float
the initial guess for the inverse diagonal Hessian. This particular
implementation of LBFGS takes all the inverse diagonal components to be the same.
events : list of callables
these are called after each iteration. events can also be added using
attachEvent()
alternate_stop_criterion : callable
this criterion will be used rather than rms gradiant to determine when
to stop the iteration
debug :
print debugging information
logger : logger object
messages will be passed to this logger rather than the default
energy, gradient : float, float array
The initial energy and gradient. If these are both not None then the
energy and gradient of the initial point will not be calculated, saving
one potential call.
armijo : bool
Use the Armijo criterion instead of maxErise as the criterion for
accepting a step size. The Armijo criterion is the first wolfe criterion
and is a condition that the energy decrease sufficiently
armijo_c : float
This adjusts how strong the armijo rule is. 0 < armijo_c < 1. Default
1e-4
fortran : bool
use the fortran version of the LBFGS. Only the step which computes
the step size and direction from the memory is in fortran.
Notes
-----
This each iteration of this minimization routine is composed of the
following parts
1. determine a step size and direction using the LBFGS algorithm
2. ensure the step size is appropriate (see maxErise and maxstep).
Reduce the step size until conditions are satisfied.
3. take step
http://dx.doi.org/10.1007/BF01589116
See Also
--------
MYLBFGS : this implemented in a compiled language
lbfgs_py : a function wrapper
"""
def __init__(self, X, pot, maxstep=0.1, maxErise=1e-4, M=4,
rel_energy=False, H0=0.1, events=None,
alternate_stop_criterion=None, debug=False,
iprint=-1, nsteps=10000, tol=1e-5, logger=None,
energy=None, gradient=None, armijo=False,
armijo_c=1e-4,
fortran=False,
):
X = X.copy()
self.X = X
self.N = len(X)
self.M = M
self.pot = pot
self._use_wolfe = False # this didn't work very well. should probably remove
self._armijo = bool(armijo)
self._wolfe1 = armijo_c
self._wolfe2 = 0.99
self._cython = False # we could make this passable
self._fortran = bool(fortran)
self.funcalls = 0
if energy is not None and gradient is not None:
self.energy = energy
self.G = gradient
else:
self.energy, self.G = self.pot.getEnergyGradient(self.X)
self.funcalls =+ 1
self.rms = np.linalg.norm(self.G) / np.sqrt(self.N)
self.maxstep = maxstep
self.maxErise = maxErise
self.rel_energy = rel_energy #use relative energy comparison for maxErise
self.events = events #a list of events to run during the optimization
if self.events is None: self.events = []
self.iprint = iprint
self.nsteps = nsteps
self.tol = tol
if logger is None:
self.logger = _logger
else:
self.logger = logger
self.alternate_stop_criterion = alternate_stop_criterion
self.debug = debug #print debug messages
self.s = np.zeros([self.M, self.N]) # position updates
self.y = np.zeros([self.M, self.N]) # gradient updates
# self.a = np.zeros(self.M) #approximation for the inverse hessian
#self.beta = np.zeros(M) #working space
if H0 is None:
self.H0 = 0.1
else:
self.H0 = H0
if self.H0 < 1e-10:
self.logger.warning("initial guess for inverse Hessian diagonal is negative or too small %s %s",
self.H0, "resetting it to 1.")
self.H0 = 1.
self.rho = np.zeros(M)
self.k = 0
self.s[0,:] = self.X
self.y[0,:] = self.G
self.rho[0] = 0. #1. / np.dot(X,G)
self.dXold = np.zeros(self.X.size)
self.dGold = np.zeros(self.X.size)
self._have_dXold = False
self.nfailed = 0
self.iter_number = 0
self.result = Result()
self.result.message = []
def get_state(self):
"""return the state of the LBFGS memory"""
State = namedtuple("State", "s y rho k H0 dXold dGold have_dXold")
state = State(s=self.s.copy(), y=self.y.copy(),
rho=self.rho.copy(), k=self.k, H0=self.H0,
dXold=self.dXold.copy(), dGold=self.dGold.copy(),
have_dXold=self._have_dXold)
return state
def set_state(self, state):
"""set the LBFGS memory from the passed state"""
self.s = state.s
self.y = state.y
self.rho = state.rho
self.k = state.k
self.H0 = state.H0
self.dXold = state.dXold
self.dGold = state.dGold
self._have_dXold = state.have_dXold
assert self.s.shape == (self.M, self.N)
assert self.y.shape == (self.M, self.N)
assert self.rho.shape == (self.M,)
assert self.dXold.shape == (self.N,)
assert self.dGold.shape == (self.N,)
def update_coords(self, X, E, G):
"""change the location of the minimizer manually
If the position change is too great the LBFGS memory will not accurately
represent the local curvature.
"""
self.X = X.copy()
self.energy = float(E)
self.G = G.copy()
self.rms = np.linalg.norm(self.G) / np.sqrt(self.G.size)
def _add_step_to_memory(self, dX, dG):
"""
add a step to the LBFGS memory
Parameters
----------
dX : ndarray
the step: X - Xold
dG : ndarray
the change in gradient along the step: G - Gold
"""
klocal = (self.k + self.M) % self.M #=k cyclical
self.s[klocal,:] = dX
self.y[klocal,:] = dG
# the local curvature along direction s is np.dot(y, s) / norm(s)
YS = np.dot(dX, dG)
if YS == 0.:
self.logger.warning("resetting YS to 1 in lbfgs %s", YS)
YS = 1.
self.rho[klocal] = 1. / YS
# update the approximation for the diagonal inverse hessian
# scale H0 according to
# H_k = YS/YY * H_0
# this is described in Liu and Nocedal 1989
# http://dx.doi.org/10.1007/BF01589116
# note: for this step we assume H0 is always the identity
# js850: This ability to update H0 is what sets LBFGS apart from BFGS
# and makes it such a superior algorithm in my opinion. This is why
# LBFGS gets away with not using a more complicated linesearch algorithm
# and why BFGS (which can't have this step) gives nonsensical results without
# a linesearch.
YY = np.dot(dG, dG)
if YY == 0.:
self.logger.warning("warning: resetting YY to 1 in lbfgs %s", YY)
YY = 1.
self.H0 = YS / YY
# increment k
self.k += 1
def _get_LBFGS_step_cython(self, G):
import _cython_lbfgs
return _cython_lbfgs._compute_LBFGS_step(G, self.s, self.y, self.rho,
self.k, self.H0)
def _get_LBFGS_step_fortran(self, G):
import mylbfgs_updatestep
ret = mylbfgs_updatestep.lbfgs_get_step_wrapper(G, self.s.reshape(-1), self.y.reshape(-1), self.rho,
self.k, self.H0)
return ret
def _get_LBFGS_step(self, G):
"""use the LBFGS algorithm to compute a suggested step from the memory
"""
if self._cython:
return self._get_LBFGS_step_cython(G)
elif self._fortran:
return self._get_LBFGS_step_fortran(G)
s = self.s
y = self.y
rho = self.rho
k = self.k
q = G.copy()
a = np.zeros(self.M)
myrange = [ i % self.M for i in range(max([0, k - self.M]), k, 1) ]
assert len(myrange) == min(self.M, k)
#print "myrange", myrange, ki, k
for i in reversed(myrange):
a[i] = rho[i] * np.dot( s[i,:], q )
q -= a[i] * y[i,:]
#z[:] = self.H0[ki] * q[:]
z = q #q is not used anymore after this, so we can use it as workspace
z *= self.H0
for i in myrange:
beta = rho[i] * np.dot( y[i,:], z )
z += s[i,:] * (a[i] - beta)
stp = -z
if k == 0:
#make first guess for the step length cautious
gnorm = np.linalg.norm(G)
stp *= min(gnorm, 1. / gnorm)
return stp
def getStep(self, X, G):
"""update the LBFGS memory and compute a step direction and size
http://en.wikipedia.org/wiki/Limited-memory_BFGS
Liu and Nocedal 1989
http://dx.doi.org/10.1007/BF01589116
"""
# self.G = G #saved for the line search
#we have a new X and G, save in s and y
if self._have_dXold:
self._add_step_to_memory(self.dXold, self.dGold)
stp = self._get_LBFGS_step(G)
return stp
def adjustStepSize(self, X, E, G, stp):
"""
We now have a proposed step. This function will make sure it is
a good step and then take it. This is known as a Backtracking linesearch
http://en.wikipedia.org/wiki/Backtracking_line_search
1) if the step is not anti-aligned with the gradient (i.e. downhill),
then reverse the step
2) if the step is larger than maxstep, then rescale the step
3) calculate the energy and gradient of the new position
4) if the step increases the energy by more than maxErise,
then reduce the step size and go to 3)
5) if the step is reduced more than 10 times and the energy is still
not acceptable, then increment nfail, reset the lbfgs optimizer and
continue
6) if nfail is greater than 5 abort the quench
"""
f = 1.
X0 = X.copy()
G0 = G.copy()
E0 = E
if np.dot(G, stp) > 0:
if self.debug:
overlap = np.dot(G, stp) / np.linalg.norm(G) / np.linalg.norm(stp)
self.logger.warn("LBFGS returned uphill step, reversing step direction: overlap %g" % (overlap))
stp = -stp
stepsize = np.linalg.norm(stp)
if f * stepsize > self.maxstep:
f = self.maxstep / stepsize
#print "dot(grad, step)", np.dot(G0, stp) / np.linalg.norm(G0)/ np.linalg.norm(stp)
#self.nfailed = 0
nincrease = 0
while True:
X = X0 + f * stp
E, G = self.pot.getEnergyGradient(X)
self.funcalls += 1
# get the increase in energy
if self.rel_energy:
if E == 0: E = 1e-100
dE = (E - E0)/abs(E)
#print dE
else:
dE = E - E0
# if the increase is greater than maxErise reduce the step size
if self._accept_step(E, E0, G, G0, f * stp):
break
else:
if self.debug:
self.logger.warn("energy increased, trying a smaller step %s %s %s %s", E, E0, f*stepsize, nincrease)
f /= 10.
nincrease += 1
if nincrease > 10:
break
if nincrease > 10:
self.nfailed += 1
if self.nfailed > 10:
raise(LineSearchError("lbfgs: too many failures in adjustStepSize, exiting"))
# abort the linesearch, reset the memory and reset the coordinates
#print "lbfgs: having trouble finding a good step size. dot(grad, step)", np.dot(G0, stp) / np.linalg.norm(G0)/ np.linalg.norm(stp)
self.logger.warning("lbfgs: having trouble finding a good step size. %s %s", f*stepsize, stepsize)
self.reset()
E = E0
G = G0
X = X0
f = 0.
self.stepsize = f * stepsize
return X, E, G
def _accept_step(self, Enew, Eold, Gnew, Gold, step, strong=False):
"""determine whether the step is acceptable"""
if self._use_wolfe:
return self._wolfe_conditions(Enew, Eold, Gnew, Gold, step, strong)
elif self._armijo:
return self._armijo_condition(Enew, Eold, Gold, step)
else:
# get the increase in energy
if self.rel_energy:
if Enew == 0:
Enew = 1e-100
dE = (Enew - Eold) / abs(Eold)
#print dE
else:
dE = Enew - Eold
# if the increase is greater than maxErise reduce the step size
return dE <= self.maxErise
def _armijo_condition(self, Enew, Eold, Gold, step, return_overlap=False):
"""test if the armijo condition is satisfied
The energy cannot rise more than an amount dependent on the
dot product of the gradient and the step
"""
overlap_old = np.dot(Gold, step)
armijo = Enew <= Eold + overlap_old * self._wolfe1
if not armijo and self.debug:
stepsize = np.linalg.norm(step)
print self.iter_number, "rejecting step due to energy", Enew, Enew-Eold, overlap_old * self._wolfe1, "stepsize", stepsize
if return_overlap:
return armijo, overlap_old
return armijo
def _wolfe_conditions(self, Enew, Eold, Gnew, Gold, step, strong=False):
"""return True if the Wolfe conditions are satisfied, False otherwise
wolfe1 : the energy cannot rise more than an amount dependent on the
dot product of the gradient and the step
wolfe2 : the overlap of the gradient with the step direction cannot
decrease by more than a given factor
"""
armijo, overlap_old = self._armijo_condition(Enew, Eold, Gold, step, return_overlap=True)
if not armijo:
return False
overlap_new = np.dot(Gnew, step)
if strong:
wolfe2 = np.abs(overlap_new) <= np.abs(overlap_old) * self._wolfe2
else:
wolfe2 = overlap_new >= overlap_old * self._wolfe2
if not wolfe2 and self.debug:
stepsize = np.linalg.norm(step)
print self.iter_number, "rejecting step due to gradient", overlap_new, overlap_old, self._wolfe2, "stepsize", stepsize
return armijo and wolfe2
def reset(self):
"""reset the LBFGS memory and H0"""
self.H0 = 0.1
self.k = 0
self._have_dXold = False
def attachEvent(self, event):
self.events.append(event)
def one_iteration(self):
"""do one iteration of the LBFGS loop
"""
stp = self.getStep(self.X, self.G)
Xnew, self.energy, Gnew = self.adjustStepSize(self.X, self.energy, self.G, stp)
self.dXold = Xnew - self.X
self.dGold = Gnew - self.G
self._have_dXold = True
self.X = Xnew
self.G = Gnew
self.rms = np.linalg.norm(self.G) / np.sqrt(self.N)
if self.iprint > 0 and self.iter_number % self.iprint == 0:
self.logger.info("lbfgs: %s %s %s %s %s %s %s %s %s", self.iter_number, "E", self.energy,
"rms", self.rms, "funcalls", self.funcalls, "stepsize", self.stepsize)
for event in self.events:
event(coords=self.X, energy=self.energy, rms=self.rms)
self.iter_number += 1
return True
def stop_criterion_satisfied(self):
"""test the stop criterion"""
if self.alternate_stop_criterion is None:
return self.rms < self.tol
else:
return self.alternate_stop_criterion(energy=self.energy, gradient=self.G,
tol=self.tol, coords=self.X)
def run(self):
"""run the LBFGS minimizer
stop when the stop criterion is satisfied or when the maximum number
of steps is reached
Returns
-------
return a results object
"""
while self.iter_number < self.nsteps and not self.stop_criterion_satisfied():
try:
self.one_iteration()
except LineSearchError:
self.logger.error("problem with adjustStepSize, ending quench")
self.rms = np.linalg.norm(self.G) / np.sqrt(self.N)
self.logger.error(" on failure: quench step %s %s %s %s", self.iter_number, self.energy, self.rms, self.funcalls)
self.result.message.append( "problem with adjustStepSize" )
break
return self.get_result()
def get_result(self):
"""return a results object"""
res = self.result
res.nsteps = self.iter_number
res.nfev = self.funcalls
res.coords = self.X
res.energy = self.energy
res.rms = self.rms
res.grad = self.G
res.H0 = self.H0
res.success = self.stop_criterion_satisfied()
return res
#
# only testing stuff below here
#
class PrintEvent:
def __init__(self, fname):
self.fout = open(fname, "w")
self.coordslist = []
def __call__(self, coords, **kwargs):
from pele.utils.xyz import write_xyz
write_xyz(self.fout, coords)
self.coordslist.append( coords.copy() )
def test(pot, natoms = 100, iprint=-1):
#X = bfgs.getInitialCoords(natoms, pot)
#X += np.random.uniform(-1,1,[3*natoms]) * 0.3
X = np.random.uniform(-1,1,[natoms*3])*(1.*natoms)**(1./3)*.5
runtest(X, pot, natoms, iprint)
def runtest(X, pot, natoms = 100, iprint=-1):
tol = 1e-5
Xinit = np.copy(X)
e, g = pot.getEnergyGradient(X)
print "energy", e
lbfgs = LBFGS(X, pot, maxstep = 0.1, tol=tol, iprint=iprint, nsteps=10000)
printevent = PrintEvent( "debugout.xyz")
lbfgs.attachEvent(printevent)
ret = lbfgs.run()
print "done", ret
print "now do the same with scipy lbfgs"
from pele.optimize import lbfgs_scipy as quench
ret = quench(Xinit, pot, tol = tol)
print ret
if False:
print "now do the same with scipy bfgs"
from pele.optimize import bfgs as oldbfgs
ret = oldbfgs(Xinit, pot, tol = tol)
print ret
if False:
print "now do the same with gradient + linesearch"
import _bfgs
gpl = _bfgs.GradientPlusLinesearch(Xinit, pot, maxstep = 0.1)
ret = gpl.run(1000, tol = 1e-6)
print ret
if False:
print "calling from wrapper function"
from pele.optimize import lbfgs_py
ret = lbfgs_py(Xinit, pot, tol = tol)
print ret
try:
import pele.utils.pymolwrapper as pym
pym.start()
for n, coords in enumerate(printevent.coordslist):
coords = coords.reshape([-1, 3])
pym.draw_spheres(coords, "A", n)
except ImportError:
print "error loading pymol"
if __name__ == "__main__":
from pele.potentials.lj import LJ
from pele.potentials.ATLJ import ATLJ
pot = ATLJ()
#test(pot, natoms=3, iprint=1)
# coords = np.loadtxt("coords")
natoms = 10
coords = np.random.uniform(-1,1,natoms*3)
print coords.size
coords = np.reshape(coords, coords.size)
print coords
runtest(coords, pot, natoms=3, iprint=1)
|
js850/pele
|
pele/optimize/_lbfgs_py.py
|
Python
|
gpl-3.0
| 21,527
|
[
"PyMOL"
] |
9516779a76e5c83ec2385dc2719a4bd6f81fb119d5742b9a0910245a8cbe2879
|
import numpy as np
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
if vmax.ndim > 0:
vmax[~np.isfinite(vmax)] = 0
elif not np.isfinite(vmax):
vmax = 0
with np.errstate(divide="ignore"):
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model
Args:
X: array like, shape (n_observations, n_features)
means: array like, shape (n_components, n_features)
covars: array like, shape (n_components, n_features)
Output:
lpr: array like, shape (n_observations, n_components)
From scikit-learn/sklearn/mixture/gmm.py
"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
|
rendanim/HMM_Speech_Recognition
|
tools2.py
|
Python
|
gpl-3.0
| 1,326
|
[
"Gaussian"
] |
aeb6928440d0df3748b4051a70438288ecb6d43a7faaac0703afbb8d5623fe0d
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
from collections import Counter
import numpy as np
from scipy import sparse
from pygsp import utils
from .fourier import FourierMixIn
from .difference import DifferenceMixIn
from ._io import IOMixIn
from ._layout import LayoutMixIn
class Graph(FourierMixIn, DifferenceMixIn, IOMixIn, LayoutMixIn):
r"""Base graph class.
* Instantiate it to construct a graph from a (weighted) adjacency matrix.
* Provide a common interface (and implementation) for graph objects.
* Initialize attributes for derived classes.
Parameters
----------
adjacency : sparse matrix or array_like
The (weighted) adjacency matrix of size n_vertices by n_vertices that
encodes the graph.
The data is copied except if it is a sparse matrix in CSR format.
lap_type : {'combinatorial', 'normalized'}
The kind of Laplacian to be computed by :meth:`compute_laplacian`.
coords : array_like
A matrix of size n_vertices by d that represents the coordinates of the
vertices in a d-dimensional embedding space.
plotting : dict
Plotting parameters.
Attributes
----------
n_vertices or N : int
The number of vertices (nodes) in the graph.
n_edges or Ne : int
The number of edges (links) in the graph.
W : :class:`scipy.sparse.csr_matrix`
The adjacency matrix that contains the weights of the edges.
It is represented as an n_vertices by n_vertices matrix, where
:math:`W_{i,j}` is the weight of the edge :math:`(v_i, v_j)` from
vertex :math:`v_i` to vertex :math:`v_j`. :math:`W_{i,j} = 0` means
that there is no direct connection.
L : :class:`scipy.sparse.csr_matrix`
The graph Laplacian, an N-by-N matrix computed from W.
lap_type : 'normalized', 'combinatorial'
The kind of Laplacian that was computed by :func:`compute_laplacian`.
signals : dict (string -> :class:`numpy.ndarray`)
Signals attached to the graph.
coords : :class:`numpy.ndarray`
Vertices coordinates in 2D or 3D space. Used for plotting only.
plotting : dict
Plotting parameters.
Examples
--------
Define a simple graph.
>>> graph = graphs.Graph([
... [0., 2., 0.],
... [2., 0., 5.],
... [0., 5., 0.],
... ])
>>> graph
Graph(n_vertices=3, n_edges=2)
>>> graph.n_vertices, graph.n_edges
(3, 2)
>>> graph.W.toarray()
array([[0., 2., 0.],
[2., 0., 5.],
[0., 5., 0.]])
>>> graph.d
array([1, 2, 1], dtype=int32)
>>> graph.dw
array([2., 7., 5.])
>>> graph.L.toarray()
array([[ 2., -2., 0.],
[-2., 7., -5.],
[ 0., -5., 5.]])
Add some coordinates to plot it.
>>> import matplotlib.pyplot as plt
>>> graph.set_coordinates([
... [0, 0],
... [0, 1],
... [1, 0],
... ])
>>> fig, ax = graph.plot()
"""
def __init__(self, adjacency, lap_type='combinatorial', coords=None,
plotting={}):
self.logger = utils.build_logger(__name__)
if not sparse.isspmatrix(adjacency):
adjacency = np.asanyarray(adjacency)
if (adjacency.ndim != 2) or (adjacency.shape[0] != adjacency.shape[1]):
raise ValueError('Adjacency: must be a square matrix.')
# CSR sparse matrices are the most efficient for matrix multiplication.
# They are the sole sparse matrix type to support eliminate_zeros().
self._adjacency = sparse.csr_matrix(adjacency, copy=False)
if np.isnan(self._adjacency.sum()):
raise ValueError('Adjacency: there is a Not a Number (NaN).')
if np.isinf(self._adjacency.sum()):
raise ValueError('Adjacency: there is an infinite value.')
if self.has_loops():
self.logger.warning('Adjacency: there are self-loops '
'(non-zeros on the diagonal). '
'The Laplacian will not see them.')
if (self._adjacency < 0).nnz != 0:
self.logger.warning('Adjacency: there are negative edge weights.')
self.n_vertices = self._adjacency.shape[0]
# Don't keep edges of 0 weight. Otherwise n_edges will not correspond
# to the real number of edges. Problematic when plotting.
self._adjacency.eliminate_zeros()
self._directed = None
self._connected = None
# Don't count edges two times if undirected.
# Be consistent with the size of the differential operator.
if self.is_directed():
self.n_edges = self._adjacency.nnz
else:
diagonal = np.count_nonzero(self._adjacency.diagonal())
off_diagonal = self._adjacency.nnz - diagonal
self.n_edges = off_diagonal // 2 + diagonal
if coords is not None:
# TODO: self.coords should be None if unset.
self.coords = np.asanyarray(coords)
self.plotting = {
'vertex_size': 100,
'vertex_color': (0.12, 0.47, 0.71, 0.5),
'edge_color': (0.5, 0.5, 0.5, 0.5),
'edge_width': 2,
'edge_style': '-',
'highlight_color': 'C1',
'normalize_intercept': .25,
}
self.plotting.update(plotting)
self.signals = dict()
# Attributes that are lazily computed.
self._A = None
self._d = None
self._dw = None
self._lmax = None
self._lmax_method = None
self._U = None
self._e = None
self._coherence = None
self._D = None
# self._L = None
# TODO: what about Laplacian? Lazy as Fourier, or disallow change?
self.lap_type = lap_type
self.compute_laplacian(lap_type)
# TODO: kept for backward compatibility.
self.Ne = self.n_edges
self.N = self.n_vertices
def _get_extra_repr(self):
return dict()
def __repr__(self, limit=None):
s = ''
for attr in ['n_vertices', 'n_edges']:
s += '{}={}, '.format(attr, getattr(self, attr))
for i, (key, value) in enumerate(self._get_extra_repr().items()):
if (limit is not None) and (i == limit - 2):
s += '..., '
break
s += '{}={}, '.format(key, value)
return '{}({})'.format(self.__class__.__name__, s[:-2])
def set_signal(self, signal, name):
r"""Attach a signal to the graph.
Attached signals can be accessed (and modified or deleted) through the
:attr:`signals` dictionary.
Parameters
----------
signal : array_like
A sequence that assigns a value to each vertex.
The value of the signal at vertex `i` is ``signal[i]``.
name : String
Name of the signal used as a key in the :attr:`signals` dictionary.
Examples
--------
>>> graph = graphs.Sensor(10)
>>> signal = np.arange(graph.n_vertices)
>>> graph.set_signal(signal, 'mysignal')
>>> graph.signals
{'mysignal': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}
"""
signal = self._check_signal(signal)
self.signals[name] = signal
def subgraph(self, vertices):
r"""Create a subgraph from a list of vertices.
Parameters
----------
vertices : list
Vertices to keep.
Either a list of indices or an indicator function.
Returns
-------
subgraph : :class:`Graph`
Subgraph.
Examples
--------
>>> graph = graphs.Graph([
... [0., 3., 0., 0.],
... [3., 0., 4., 0.],
... [0., 4., 0., 2.],
... [0., 0., 2., 0.],
... ])
>>> graph = graph.subgraph([0, 2, 1])
>>> graph.W.toarray()
array([[0., 0., 3.],
[0., 0., 4.],
[3., 4., 0.]])
"""
adjacency = self.W[vertices, :][:, vertices]
try:
coords = self.coords[vertices]
except AttributeError:
coords = None
graph = Graph(adjacency, self.lap_type, coords, self.plotting)
for name, signal in self.signals.items():
graph.set_signal(signal[vertices], name)
return graph
def is_weighted(self):
r"""Check if the graph is weighted.
A graph is unweighted (binary) if and only if all the entries in the
adjacency matrix are either zero or one.
Returns
-------
weighted : bool
True if the graph is weighted, False otherwise.
Examples
--------
Unweighted (binary) graph:
>>> graph = graphs.Graph([
... [0, 1, 0],
... [1, 0, 1],
... [0, 1, 0],
... ])
>>> graph.is_weighted()
False
Weighted graph:
>>> graph = graphs.Graph([
... [0, 2, 0],
... [2, 0, 1],
... [0, 1, 0],
... ])
>>> graph.is_weighted()
True
"""
return not np.all(self.W.data == 1)
def is_connected(self):
r"""Check if the graph is connected (cached).
A graph is connected if and only if there exists a (directed) path
between any two vertices.
Returns
-------
connected : bool
True if the graph is connected, False otherwise.
Notes
-----
For undirected graphs, starting at a vertex and trying to visit all the
others is enough.
For directed graphs, one needs to check that a vertex can both be
visited by all the others and visit all the others.
Examples
--------
Connected graph:
>>> graph = graphs.Graph([
... [0, 3, 0, 0],
... [3, 0, 4, 0],
... [0, 4, 0, 2],
... [0, 0, 2, 0],
... ])
>>> graph.is_connected()
True
Disconnected graph:
>>> graph = graphs.Graph([
... [0, 3, 0, 0],
... [3, 0, 4, 0],
... [0, 0, 0, 2],
... [0, 0, 2, 0],
... ])
>>> graph.is_connected()
False
"""
if self._connected is not None:
return self._connected
adjacencies = [self.W]
if self.is_directed():
adjacencies.append(self.W.T)
for adjacency in adjacencies:
visited = np.zeros(self.n_vertices, dtype=bool)
stack = set([0])
while stack:
vertex = stack.pop()
if visited[vertex]:
continue
visited[vertex] = True
neighbors = adjacency[vertex].nonzero()[1]
stack.update(neighbors)
if not np.all(visited):
self._connected = False
return self._connected
self._connected = True
return self._connected
def is_directed(self):
r"""Check if the graph has directed edges (cached).
In this framework, we consider that a graph is directed if and
only if its weight matrix is not symmetric.
Returns
-------
directed : bool
True if the graph is directed, False otherwise.
Examples
--------
Directed graph:
>>> graph = graphs.Graph([
... [0, 3, 0],
... [3, 0, 4],
... [0, 0, 0],
... ])
>>> graph.is_directed()
True
Undirected graph:
>>> graph = graphs.Graph([
... [0, 3, 0],
... [3, 0, 4],
... [0, 4, 0],
... ])
>>> graph.is_directed()
False
"""
if self._directed is None:
self._directed = (self.W != self.W.T).nnz != 0
return self._directed
def has_loops(self):
r"""Check if any vertex is connected to itself.
A graph has self-loops if and only if the diagonal entries of its
adjacency matrix are not all zero.
Returns
-------
loops : bool
True if the graph has self-loops, False otherwise.
Examples
--------
Without self-loops:
>>> graph = graphs.Graph([
... [0, 3, 0],
... [3, 0, 4],
... [0, 0, 0],
... ])
>>> graph.has_loops()
False
With a self-loop:
>>> graph = graphs.Graph([
... [1, 3, 0],
... [3, 0, 4],
... [0, 0, 0],
... ])
>>> graph.has_loops()
True
"""
return np.any(self.W.diagonal() != 0)
def extract_components(self):
r"""Split the graph into connected components.
See :func:`is_connected` for the method used to determine
connectedness.
Returns
-------
graphs : list
A list of graph structures. Each having its own node list and
weight matrix. If the graph is directed, add into the info
parameter the information about the source nodes and the sink
nodes.
Examples
--------
>>> from scipy import sparse
>>> W = sparse.rand(10, 10, 0.2)
>>> W = utils.symmetrize(W)
>>> G = graphs.Graph(W)
>>> components = G.extract_components()
>>> has_sinks = 'sink' in components[0].info
>>> sinks_0 = components[0].info['sink'] if has_sinks else []
"""
if self.A.shape[0] != self.A.shape[1]:
self.logger.error('Inconsistent shape to extract components. '
'Square matrix required.')
return None
if self.is_directed():
raise NotImplementedError('Directed graphs not supported yet.')
graphs = []
visited = np.zeros(self.A.shape[0], dtype=bool)
# indices = [] # Assigned but never used
while not visited.all():
# pick a node not visted yet
stack = set(np.nonzero(~visited)[0][[0]])
comp = []
while len(stack):
v = stack.pop()
if not visited[v]:
comp.append(v)
visited[v] = True
# Add indices of nodes not visited yet and accessible from
# v
stack.update(set([idx for idx in self.A[v, :].nonzero()[1]
if not visited[idx]]))
comp = sorted(comp)
self.logger.info(('Constructing subgraph for component of '
'size {}.').format(len(comp)))
G = self.subgraph(comp)
G.info = {'orig_idx': comp}
graphs.append(G)
return graphs
def compute_laplacian(self, lap_type='combinatorial'):
r"""Compute a graph Laplacian.
For undirected graphs, the combinatorial Laplacian is defined as
.. math:: L = D - W,
where :math:`W` is the weighted adjacency matrix and :math:`D` the
weighted degree matrix. The normalized Laplacian is defined as
.. math:: L = I - D^{-1/2} W D^{-1/2},
where :math:`I` is the identity matrix.
For directed graphs, the Laplacians are built from a symmetrized
version of the weighted adjacency matrix that is the average of the
weighted adjacency matrix and its transpose. As the Laplacian is
defined as the divergence of the gradient, it is not affected by the
orientation of the edges.
For both Laplacians, the diagonal entries corresponding to disconnected
nodes (i.e., nodes with degree zero) are set to zero.
Once computed, the Laplacian is accessible by the attribute :attr:`L`.
Parameters
----------
lap_type : {'combinatorial', 'normalized'}
The kind of Laplacian to compute. Default is combinatorial.
Examples
--------
Combinatorial and normalized Laplacians of an undirected graph.
>>> graph = graphs.Graph([
... [0, 2, 0],
... [2, 0, 1],
... [0, 1, 0],
... ])
>>> graph.compute_laplacian('combinatorial')
>>> graph.L.toarray()
array([[ 2., -2., 0.],
[-2., 3., -1.],
[ 0., -1., 1.]])
>>> graph.compute_laplacian('normalized')
>>> graph.L.toarray()
array([[ 1. , -0.81649658, 0. ],
[-0.81649658, 1. , -0.57735027],
[ 0. , -0.57735027, 1. ]])
Combinatorial and normalized Laplacians of a directed graph.
>>> graph = graphs.Graph([
... [0, 2, 0],
... [2, 0, 1],
... [0, 0, 0],
... ])
>>> graph.compute_laplacian('combinatorial')
>>> graph.L.toarray()
array([[ 2. , -2. , 0. ],
[-2. , 2.5, -0.5],
[ 0. , -0.5, 0.5]])
>>> graph.compute_laplacian('normalized')
>>> graph.L.toarray()
array([[ 1. , -0.89442719, 0. ],
[-0.89442719, 1. , -0.4472136 ],
[ 0. , -0.4472136 , 1. ]])
The Laplacian is defined as the divergence of the gradient.
See :meth:`compute_differential_operator` for details.
>>> graph = graphs.Path(20)
>>> graph.compute_differential_operator()
>>> L = graph.D.dot(graph.D.T)
>>> np.all(L.toarray() == graph.L.toarray())
True
The Laplacians have a bounded spectrum.
>>> G = graphs.Sensor(50)
>>> G.compute_laplacian('combinatorial')
>>> G.compute_fourier_basis()
>>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2*np.max(G.dw)
True
>>> G.compute_laplacian('normalized')
>>> G.compute_fourier_basis()
>>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2
True
"""
if lap_type != self.lap_type:
# Those attributes are invalidated when the Laplacian is changed.
# Alternative: don't allow the user to change the Laplacian.
self._lmax = None
self._U = None
self._e = None
self._coherence = None
self._D = None
self.lap_type = lap_type
if not self.is_directed():
W = self.W
else:
W = utils.symmetrize(self.W, method='average')
if lap_type == 'combinatorial':
D = sparse.diags(self.dw)
self.L = D - W
elif lap_type == 'normalized':
d = np.zeros(self.n_vertices)
disconnected = (self.dw == 0)
np.power(self.dw, -0.5, where=~disconnected, out=d)
D = sparse.diags(d)
self.L = sparse.identity(self.n_vertices) - D * W * D
self.L[disconnected, disconnected] = 0
self.L.eliminate_zeros()
else:
raise ValueError('Unknown Laplacian type {}'.format(lap_type))
def _check_signal(self, s):
r"""Check if signal is valid."""
s = np.asanyarray(s)
if s.shape[0] != self.n_vertices:
raise ValueError('First dimension must be the number of vertices '
'G.N = {}, got {}.'.format(self.N, s.shape))
return s
def dirichlet_energy(self, x):
r"""Compute the Dirichlet energy of a signal defined on the vertices.
The Dirichlet energy of a signal :math:`x` is defined as
.. math:: x^\top L x = \| \nabla_\mathcal{G} x \|_2^2
= \frac12 \sum_{i,j} W[i, j] (x[j] - x[i])^2
for the combinatorial Laplacian, and
.. math:: x^\top L x = \| \nabla_\mathcal{G} x \|_2^2
= \frac12 \sum_{i,j} W[i, j]
\left( \frac{x[j]}{d[j]} - \frac{x[i]}{d[i]} \right)^2
for the normalized Laplacian, where :math:`d` is the weighted degree
:attr:`dw`, :math:`\nabla_\mathcal{G} x = D^\top x` and :math:`D` is
the differential operator :attr:`D`. See :meth:`grad` for the
definition of the gradient :math:`\nabla_\mathcal{G}`.
Parameters
----------
x : array_like
Signal of length :attr:`n_vertices` living on the vertices.
Returns
-------
energy : float
The Dirichlet energy of the graph signal.
See Also
--------
grad : compute the gradient of a vertex signal
Examples
--------
Non-directed graph:
>>> graph = graphs.Path(5, directed=False)
>>> signal = [0, 2, 2, 4, 4]
>>> graph.dirichlet_energy(signal)
8.0
>>> # The Dirichlet energy is indeed the squared norm of the gradient.
>>> graph.compute_differential_operator()
>>> graph.grad(signal)
array([2., 0., 2., 0.])
Directed graph:
>>> graph = graphs.Path(5, directed=True)
>>> signal = [0, 2, 2, 4, 4]
>>> graph.dirichlet_energy(signal)
4.0
>>> # The Dirichlet energy is indeed the squared norm of the gradient.
>>> graph.compute_differential_operator()
>>> graph.grad(signal)
array([1.41421356, 0. , 1.41421356, 0. ])
"""
x = self._check_signal(x)
return x.T.dot(self.L.dot(x))
@property
def W(self):
r"""Weighted adjacency matrix of the graph."""
return self._adjacency
@W.setter
def W(self, value):
# TODO: user can still do G.W[0, 0] = 1, or modify the passed W.
raise AttributeError('In-place modification of the graph is not '
'supported. Create another Graph object.')
@property
def A(self):
r"""Graph adjacency matrix (the binary version of W).
The adjacency matrix defines which edges exist on the graph.
It is represented as an N-by-N matrix of booleans.
:math:`A_{i,j}` is True if :math:`W_{i,j} > 0`.
"""
if self._A is None:
self._A = self.W > 0
return self._A
@property
def d(self):
r"""The degree (number of neighbors) of vertices.
For undirected graphs, the degree of a vertex is the number of vertices
it is connected to.
For directed graphs, the degree is the average of the in and out
degrees, where the in degree is the number of incoming edges, and the
out degree the number of outgoing edges.
In both cases, the degree of the vertex :math:`v_i` is the average
between the number of non-zero values in the :math:`i`-th column (the
in degree) and the :math:`i`-th row (the out degree) of the weighted
adjacency matrix :attr:`W`.
Examples
--------
Undirected graph:
>>> graph = graphs.Graph([
... [0, 1, 0],
... [1, 0, 2],
... [0, 2, 0],
... ])
>>> print(graph.d) # Number of neighbors.
[1 2 1]
>>> print(graph.dw) # Weighted degree.
[1 3 2]
Directed graph:
>>> graph = graphs.Graph([
... [0, 1, 0],
... [0, 0, 2],
... [0, 2, 0],
... ])
>>> print(graph.d) # Number of neighbors.
[0.5 1.5 1. ]
>>> print(graph.dw) # Weighted degree.
[0.5 2.5 2. ]
"""
if self._d is None:
if not self.is_directed():
# Shortcut for undirected graphs.
self._d = self.W.getnnz(axis=1)
# axis=1 faster for CSR (https://stackoverflow.com/a/16391764)
else:
degree_in = self.W.getnnz(axis=0)
degree_out = self.W.getnnz(axis=1)
self._d = (degree_in + degree_out) / 2
return self._d
@property
def dw(self):
r"""The weighted degree of vertices.
For undirected graphs, the weighted degree of the vertex :math:`v_i` is
defined as
.. math:: d[i] = \sum_j W[j, i] = \sum_j W[i, j],
where :math:`W` is the weighted adjacency matrix :attr:`W`.
For directed graphs, the weighted degree of the vertex :math:`v_i` is
defined as
.. math:: d[i] = \frac12 (d^\text{in}[i] + d^\text{out}[i])
= \frac12 (\sum_j W[j, i] + \sum_j W[i, j]),
i.e., as the average of the in and out degrees.
Examples
--------
Undirected graph:
>>> graph = graphs.Graph([
... [0, 1, 0],
... [1, 0, 2],
... [0, 2, 0],
... ])
>>> print(graph.d) # Number of neighbors.
[1 2 1]
>>> print(graph.dw) # Weighted degree.
[1 3 2]
Directed graph:
>>> graph = graphs.Graph([
... [0, 1, 0],
... [0, 0, 2],
... [0, 2, 0],
... ])
>>> print(graph.d) # Number of neighbors.
[0.5 1.5 1. ]
>>> print(graph.dw) # Weighted degree.
[0.5 2.5 2. ]
"""
if self._dw is None:
if not self.is_directed():
# Shortcut for undirected graphs.
self._dw = np.ravel(self.W.sum(axis=0))
else:
degree_in = np.ravel(self.W.sum(axis=0))
degree_out = np.ravel(self.W.sum(axis=1))
self._dw = (degree_in + degree_out) / 2
return self._dw
@property
def lmax(self):
r"""Largest eigenvalue of the graph Laplacian.
Can be exactly computed by :func:`compute_fourier_basis` or
approximated by :func:`estimate_lmax`.
"""
if self._lmax is None:
self.logger.warning('The largest eigenvalue G.lmax is not '
'available, we need to estimate it. '
'Explicitly call G.estimate_lmax() or '
'G.compute_fourier_basis() '
'once beforehand to suppress the warning.')
self.estimate_lmax()
return self._lmax
def estimate_lmax(self, method='lanczos'):
r"""Estimate the Laplacian's largest eigenvalue (cached).
The result is cached and accessible by the :attr:`lmax` property.
Exact value given by the eigendecomposition of the Laplacian, see
:func:`compute_fourier_basis`. That estimation is much faster than the
eigendecomposition.
Parameters
----------
method : {'lanczos', 'bounds'}
Whether to estimate the largest eigenvalue with the implicitly
restarted Lanczos method, or to return an upper bound on the
spectrum of the Laplacian.
Notes
-----
Runs the implicitly restarted Lanczos method (as implemented in
:func:`scipy.sparse.linalg.eigsh`) with a large tolerance, then
increases the calculated largest eigenvalue by 1 percent. For much of
the PyGSP machinery, we need to approximate filter kernels on an
interval that contains the spectrum of L. The only cost of using a
larger interval is that the polynomial approximation over the larger
interval may be a slightly worse approximation on the actual spectrum.
As this is a very mild effect, it is not necessary to obtain very tight
bounds on the spectrum of L.
A faster but less tight alternative is to use known algebraic bounds on
the graph Laplacian.
Examples
--------
>>> G = graphs.Logo()
>>> G.compute_fourier_basis() # True value.
>>> print('{:.2f}'.format(G.lmax))
13.78
>>> G.estimate_lmax(method='lanczos') # Estimate.
>>> print('{:.2f}'.format(G.lmax))
13.92
>>> G.estimate_lmax(method='bounds') # Upper bound.
>>> print('{:.2f}'.format(G.lmax))
18.58
"""
if method == self._lmax_method:
return
self._lmax_method = method
if method == 'lanczos':
try:
# We need to cast the matrix L to a supported type.
# TODO: not good for memory. Cast earlier?
lmax = sparse.linalg.eigsh(self.L.asfptype(), k=1, tol=5e-3,
ncv=min(self.N, 10),
return_eigenvectors=False)
lmax = lmax[0]
assert lmax <= self._get_upper_bound() + 1e-12
lmax *= 1.01 # Increase by 1% to be robust to errors.
self._lmax = lmax
except sparse.linalg.ArpackNoConvergence:
raise ValueError('The Lanczos method did not converge. '
'Try to use bounds.')
elif method == 'bounds':
self._lmax = self._get_upper_bound()
else:
raise ValueError('Unknown method {}'.format(method))
def _get_upper_bound(self):
r"""Return an upper bound on the eigenvalues of the Laplacian."""
if self.lap_type == 'normalized':
return 2 # Equal iff the graph is bipartite.
elif self.lap_type == 'combinatorial':
bounds = []
# Equal for full graphs.
bounds += [self.n_vertices * np.max(self.W)]
# Gershgorin circle theorem. Equal for regular bipartite graphs.
# Special case of the below bound.
bounds += [2 * np.max(self.dw)]
# Anderson, Morley, Eigenvalues of the Laplacian of a graph.
# Equal for regular bipartite graphs.
if self.n_edges > 0:
sources, targets, _ = self.get_edge_list()
bounds += [np.max(self.dw[sources] + self.dw[targets])]
# Merris, A note on Laplacian graph eigenvalues.
if not self.is_directed():
W = self.W
else:
W = utils.symmetrize(self.W, method='average')
m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices.
bounds += [np.max(self.dw + m)]
# Good review: On upper bounds for Laplacian graph eigenvalues.
return min(bounds)
else:
raise ValueError('Unknown Laplacian type '
'{}'.format(self.lap_type))
def get_edge_list(self):
r"""Return an edge list, an alternative representation of the graph.
Each edge :math:`e_k = (v_i, v_j) \in \mathcal{E}` from :math:`v_i` to
:math:`v_j` is associated with the weight :math:`W[i, j]`. For each
edge :math:`e_k`, the method returns :math:`(i, j, W[i, j])` as
`(sources[k], targets[k], weights[k])`, with :math:`i \in [0,
|\mathcal{V}|-1], j \in [0, |\mathcal{V}|-1], k \in [0,
|\mathcal{E}|-1]`.
Returns
-------
sources : vector of int
Source node indices.
targets : vector of int
Target node indices.
weights : vector of float
Edge weights.
Notes
-----
The weighted adjacency matrix is the canonical form used in this
package to represent a graph as it is the easiest to work with when
considering spectral methods.
Edge orientation (i.e., which node is the source or the target) is
arbitrary for undirected graphs.
The implementation uses the upper triangular part of the adjacency
matrix, hence :math:`i \leq j \ \forall k`.
Examples
--------
Edge list of a directed graph.
>>> graph = graphs.Graph([
... [0, 3, 0],
... [3, 0, 4],
... [0, 0, 0],
... ])
>>> sources, targets, weights = graph.get_edge_list()
>>> list(sources), list(targets), list(weights)
([0, 1, 1], [1, 0, 2], [3, 3, 4])
Edge list of an undirected graph.
>>> graph = graphs.Graph([
... [0, 3, 0],
... [3, 0, 4],
... [0, 4, 0],
... ])
>>> sources, targets, weights = graph.get_edge_list()
>>> list(sources), list(targets), list(weights)
([0, 1], [1, 2], [3, 4])
"""
if self.is_directed():
W = self.W.tocoo()
else:
W = sparse.triu(self.W, format='coo')
sources = W.row
targets = W.col
weights = W.data
assert self.n_edges == sources.size == targets.size == weights.size
return sources, targets, weights
def plot(self, vertex_color=None, vertex_size=None, highlight=[],
edges=None, edge_color=None, edge_width=None,
indices=False, colorbar=True, limits=None, ax=None,
title=None, backend=None):
r"""Docstring overloaded at import time."""
from pygsp.plotting import _plot_graph
return _plot_graph(self, vertex_color=vertex_color,
vertex_size=vertex_size, highlight=highlight,
edges=edges, indices=indices, colorbar=colorbar,
edge_color=edge_color, edge_width=edge_width,
limits=limits, ax=ax, title=title, backend=backend)
def plot_signal(self, *args, **kwargs):
r"""Deprecated, use plot() instead."""
return self.plot(*args, **kwargs)
def plot_spectrogram(self, node_idx=None):
r"""Docstring overloaded at import time."""
from pygsp.plotting import _plot_spectrogram
_plot_spectrogram(self, node_idx=node_idx)
|
epfl-lts2/pygsp
|
pygsp/graphs/graph.py
|
Python
|
bsd-3-clause
| 33,936
|
[
"VisIt"
] |
0fe2e54541059b29bec126a4f1af267f1461ebb7f79d96ad0b21b3c52f4b43f3
|
'''This script demonstrates how to build a variational autoencoder with Keras.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda, Layer, concatenate
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from scipy.misc import imsave as ims
from utils import merge
batch_size = 100
original_dim = 784
latent_dim = 2
intermediate_dim = 256
epochs = 50
epsilon_std = 1.0
load=False
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
# inter_h = Dense(intermediate_dim, activation='relu')
# inter_h_2= Dense(latent_dim,activation='relu')
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
# What if you don't sample and just pass on the mean and log variance?
# concat=concatenate([z_mean,z_log_var])
# h_inter=inter_h(z)
# z_analog=inter_h_2(h_inter)
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean):
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean = inputs[1]
loss = self.vae_loss(x, x_decoded_mean)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return x_decoded_mean
y = CustomVariationalLayer()([x, x_decoded_mean])
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
if load==False:
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test))
vae.save_weights("sampled.hdf5")
if load==True:
vae.load_weights("sampled.hdf5")
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
# plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,)) # concat
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
visual=x_test[:64]
reshaped=visual.reshape(64,28,28)
ims("sampled_results/base.jpg", merge(reshaped[:64], [8, 8]))
recons=vae.predict(x_test,batch_size=batch_size)
reshaped=recons[:64].reshape(64,28,28)
ims("sampled_results/final.jpg", merge(reshaped[:64], [8, 8]))
# # display a 2D grid of the digits
# n = 15 # figure with 15x15 digits
# digit_size = 28
# figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
# Hold on, is this right? You should probably sample from posterior instead of prior?
# OR use another metrics, such as recovery authenticity.
# grid_x = norm.ppf(np.linspace(0.05,0.95, n))
# grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
#
# for i, yi in enumerate(grid_x):
# for j, xi in enumerate(grid_y):
# z_unsampled = np.array([[xi, yi]])
# x_decoded = generator.predict(z_unsampled)
# digit = x_decoded[0].reshape(digit_size, digit_size)
# figure[i * digit_size: (i + 1) * digit_size,
# j * digit_size: (j + 1) * digit_size] = digit
#
# plt.figure(figsize=(10, 10))
# plt.imshow(figure, cmap='Greys_r')
# plt.show()
# Sampling latent space form prior is fine. The latent space distribution is by KL similar to prior.
# But that's useful only if latent vector is fed into decoder.
# What if I feed parameters directly to decoder? How do I sample parameters to see what latent space
# looks like?
# This means I need another metrics.
# The nature of encoder is that the code must be compact. So they used one real number to represent the
# image. But a single real number loses some important information that can help classification.
# Think about embedded vector representation and reconstruction. I need to change this architecture,
# reevaluate the performance and see if VAE can bring me hope.
|
Fuchai/Philosophy-Machine
|
vae_mining/vae.py
|
Python
|
apache-2.0
| 5,617
|
[
"Gaussian"
] |
38d9b7d099bd4b1b9e306b9b2c49762ad81cdf05241429636936c4b161c87c23
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, common
from proton import *
from threading import Thread
class Test(common.Test):
def setup(self):
self.server = Messenger("server")
self.server.timeout=10000
self.server.start()
self.server.subscribe("amqp://~0.0.0.0:12345")
self.thread = Thread(name="server-thread", target=self.run)
self.thread.daemon = True
self.running = True
self.client = Messenger("client")
self.client.timeout=1000
def start(self):
self.thread.start()
self.client.start()
def teardown(self):
if self.running:
self.running = False
msg = Message()
msg.address="amqp://0.0.0.0:12345"
self.client.put(msg)
self.client.send()
self.client.stop()
self.thread.join()
self.client = None
self.server = None
REJECT_ME = "*REJECT-ME*"
class MessengerTest(Test):
def run(self):
msg = Message()
try:
while self.running:
self.server.recv(10)
self.process_incoming(msg)
except Timeout:
print "server timed out"
self.server.stop()
self.running = False
def process_incoming(self, msg):
while self.server.incoming:
self.server.get(msg)
if msg.body == REJECT_ME:
self.server.reject()
else:
self.server.accept()
self.dispatch(msg)
def dispatch(self, msg):
if msg.reply_to:
msg.address = msg.reply_to
self.server.put(msg)
self.server.settle()
def _testSendReceive(self, size=None):
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
body = "First the world, then the galaxy!"
if size is not None:
while len(body) < size:
body = 2*body
body = body[:size]
msg.load(body)
self.client.put(msg)
self.client.send()
reply = Message()
self.client.recv(1)
assert self.client.incoming == 1
self.client.get(reply)
assert reply.subject == "Hello World!"
rbod = reply.save()
assert rbod == body, (rbod, body)
def testSendReceive(self):
self._testSendReceive()
def testSendReceive1K(self):
self._testSendReceive(1024)
def testSendReceive2K(self):
self._testSendReceive(2*1024)
def testSendReceive4K(self):
self._testSendReceive(4*1024)
def testSendReceive10K(self):
self._testSendReceive(10*1024)
def testSendReceive100K(self):
self._testSendReceive(100*1024)
def testSendReceive1M(self):
self._testSendReceive(1024*1024)
def testSendBogus(self):
self.start()
msg = Message()
msg.address="totally-bogus-address"
try:
self.client.put(msg)
except MessengerException, exc:
err = str(exc)
assert "unable to send to address: totally-bogus-address" in err, err
def testOutgoingWindow(self):
self.server.incoming_window = 10
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
trackers = []
for i in range(10):
trackers.append(self.client.put(msg))
self.client.send()
for t in trackers:
assert self.client.status(t) is None
self.client.outgoing_window = 5
trackers = []
for i in range(10):
trackers.append(self.client.put(msg))
for i in range(5):
t = trackers[i]
assert self.client.status(t) is None, (t, self.client.status(t))
for i in range(5, 10):
t = trackers[i]
assert self.client.status(t) is PENDING, (t, self.client.status(t))
self.client.send()
for i in range(5):
t = trackers[i]
assert self.client.status(t) is None
for i in range(5, 10):
t = trackers[i]
assert self.client.status(t) is ACCEPTED
def testReject(self, process_incoming=None):
if process_incoming:
self.process_incoming = process_incoming
self.server.incoming_window = 10
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
self.client.outgoing_window = 10
trackers = []
rejected = []
for i in range(10):
if i == 5:
msg.body = REJECT_ME
else:
msg.body = "Yay!"
trackers.append(self.client.put(msg))
if msg.body == REJECT_ME:
rejected.append(trackers[-1])
self.client.send()
for t in trackers:
if t in rejected:
assert self.client.status(t) is REJECTED, (t, self.client.status(t))
else:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
def testRejectIndividual(self):
self.testReject(self.reject_individual)
def reject_individual(self, msg):
if self.server.incoming < 10:
return
while self.server.incoming:
t = self.server.get(msg)
if msg.body == REJECT_ME:
self.server.reject(t)
self.dispatch(msg)
self.server.accept()
def testIncomingWindow(self):
self.server.incoming_window = 10
self.server.outgoing_window = 10
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
self.client.outgoing_window = 10
trackers = []
for i in range(10):
trackers.append(self.client.put(msg))
self.client.send()
for t in trackers:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
self.client.incoming_window = 10
remaining = 10
trackers = []
while remaining:
self.client.recv(remaining)
while self.client.incoming:
t = self.client.get()
trackers.append(t)
self.client.accept(t)
remaining -= 1
for t in trackers:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
|
chirino/proton
|
tests/proton_tests/messenger.py
|
Python
|
apache-2.0
| 6,448
|
[
"Galaxy"
] |
2d88ac23dce00a72f55b57d9e9cc69717e9deb54573a84a16e108d70d042bb63
|
#!/usr/bin/env python
"""
Script to run the generation of phoSim instance catalogs
"""
from __future__ import with_statement, absolute_import, division, print_function
import time
import os
import argparse
import pandas as pd
from sqlalchemy import create_engine
from lsst.utils import getPackageDir
from lsst.sims.catUtils.utils import ObservationMetaDataGenerator
from desc.twinkles import TwinklesSky
def phoSimInputFileName(obsHistID,
prefix='InstanceCatalogs/PhoSim_input',
suffix='.txt',
location='./'):
"""
function to return the absolute path to a filename for writing the phoSim
input corresponding to obsHistID.
Parameters
----------
prefix : string, optional
suffix : string, optional, defaults to '.txt'
location : string, optional, defaults to './'
"""
return os.path.join(location, prefix + '_{}'.format(obsHistID) + suffix)
def _sql_constraint(obsHistIDList):
"""
sql constraint to get OpSim pointing records for a list of obsHistID
Parameters
----------
obsHistIDList : list of integers, mandatory
list of obsHistIDs of interest
"""
sql_string = 'SELECT * FROM Summary WHERE ObsHistID in ('
sql_string += ', '.join(map(str, obsHistIDList))
sql_string += ')'
return sql_string
def generateSinglePointing(obs_metaData, availableConns, sntable,
fname,
sn_sed_file_dir,
db_config,
cache_dir):
"""
obs_metaData : instance of `lsst.sims.utils.ObservationMetaData`
observation metadata corresponding to an OpSim pointing
availableConns : available connections to fatboy
sntable : Table for SN on the fatboy database
fname : output file for phoSim instance Catalog
sn_sed_file_dir : directory to which the SN seds corresponding to this
phoSim metadat
db_config : the name of a file overriding the fatboy connection information
cache_dir : the directory containing the source data of astrophysical objects
"""
tstart = time.time()
obs_metaData.boundLength = 0.3
print(obs_metaData.summary)
obsHistID = obs_metaData._OpsimMetaData['obsHistID']
# all but first two are default values of optional parameters
# Kept in script to emphasize inputs
tSky = TwinklesSky(obs_metadata=obs_metaData,
availableConnections=availableConns,
brightestStar_gmag_inCat=11.0,
brightestGal_gmag_inCat=11.0,
sntable=sntable,
sn_sedfile_prefix=os.path.join(sn_sed_file_dir, 'specFile_'),
db_config=db_config,
cache_dir=cache_dir)
# fname = phoSimInputFileName(obsHistID)
# if not os.path.exists(os.path.dirname(fname)):
# os.makedirs(os.path.dirname(fname))
if not os.path.exists(sn_sed_file_dir):
os.makedirs(sn_sed_file_dir)
tSky.writePhoSimCatalog(fname)
availConns = tSky.availableConnections
tend = time.time()
print (obsHistID, tend - tstart)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Write phoSim Instance Catalogs'
'and SN spectra to disk '
'example : '
'python generatePhosimInput.py 230\ '
'--OpSimDBDir ~/data/LSST/OpSimData/\ '
'--seddir "./"\ '
'--outfile phosim_instance_catalog_220.txt')
parser.add_argument('--opsimDB',
type=str,
help='OpSim database sqlite filename',
default='minion_1016_sqlite.db')
parser.add_argument('visit',
type=int,
help='Visit number (obsHistID)')
parser.add_argument('--OpSimDBDir',
help='absolute path to dir with the opsimBD',
type=str,
default='./')
parser.add_argument('--outfile', type=str, default=None,
help='output filename for instance catalog')
parser.add_argument('--seddir',
type=str,
default='.',
help='directory to contain SED files')
parser.add_argument('--db_config', type=str, default=None,
help='config file overriding CatSim database connection information')
parser.add_argument('--cache_dir', type=str,
default=os.path.join(getPackageDir('twinkles'), 'data'),
help='directory containing the source data for the InstanceCatalogs')
args = parser.parse_args()
# set the filename default to a sensible value using the obsHistID
if args.outfile is None:
args.outfile = phoSimInputFileName(args.visit,
prefix='phosim_input',
suffix='.txt',
location='./')
# Set up OpSim database
opSimDBPath = os.path.join(args.OpSimDBDir, args.opsimDB)
engine = create_engine('sqlite:///' + opSimDBPath)
obs_gen = ObservationMetaDataGenerator(database=opSimDBPath)
sql_query = 'SELECT * FROM Summary WHERE ObsHistID == {}'.format(args.visit)
df = pd.read_sql_query(sql_query, engine)
recs = df.to_records()
obsMetaDataResults = obs_gen.ObservationMetaDataFromPointingArray(recs)
obs_metaData = obsMetaDataResults[0]
sn_sed_file_dir = os.path.join(args.seddir, 'spectra_files')
availConns = None
print('will generate pointing for {0} and write to filename {1}'.format(
obs_metaData._OpsimMetaData['obsHistID'], args.outfile))
generateSinglePointing(obs_metaData,
availableConns=availConns,
sntable='TwinkSN_run3',
fname=args.outfile,
sn_sed_file_dir=sn_sed_file_dir,
db_config=args.db_config,
cache_dir=args.cache_dir)
|
LSSTDESC/Twinkles
|
bin/generatePhosimInput.py
|
Python
|
mit
| 6,329
|
[
"VisIt"
] |
759a1a2b4378017131df9bf2015047640eaa497630def147d0509db7db69f23a
|
#!/nfs/farm/g/lsst/u1/software/redhat6-x86_64-64bit-gcc44/anaconda/2.3.0/bin/python
## runPhoSim.py - Run phoSim (one visit)
import os,sys
import subprocess,shlex
print '\n\n=====================================================================\n Entering runPhoSim.py\n=====================================================================\n'
sys.stdout.flush()
################## DEBUG #######################
################## DEBUG #######################
################## DEBUG #######################
## print 'Exiting without doing anything...'
## sys.exit(0)
################## DEBUG #######################
################## DEBUG #######################
################## DEBUG #######################
## Insert task config area for python modules (insert as 2nd element in sys.path)
sys.path.insert(1,os.getenv('TW_CONFIGDIR'))
from config import *
from setupPhoSimInput import setupPhoSimInput
log.info('Entering runPhoSim.py')
print 'PHOSIMPSCRATCH = ',PHOSIMPSCRATCH
## ################################# DEBUG ############################
#sys.exit(1)
debug=False
if debug:
print "\n\n######################## RUNNING IN DEBUG MODE ######################\n\n"
## log.info("DEBUG: Exiting!")
## sys.exit(1)
if int(os.environ['PIPELINE_STREAMPATH'].split('.')[1]) > 4:
print 'PIPELINE_STREAMPATH too large'
sys.exit(1)
pass
pass
## ################################# DEBUG ############################
## In a weak attempt to prevent resource overload, offset the
## starttime by N minutes, where N is the substream number (0-188)
if not debug:
min2delay = os.environ['PIPELINE_STREAMPATH'].split('.')[1]
print 'min2delay = ',min2delay
sec2delay = int(min2delay)*60
cmd = "sleep "+str(sec2delay)
print 'cmd = ',cmd
os.system(cmd)
pass
## Prepare for phoSim: inputs and work directory in _SCRATCH_ space
log.info('Prepare phoSim input area')
icFile = os.path.basename(os.environ['TW_INSTANCE_CATALOG'])
log.info('icFile = '+icFile)
prep = setupPhoSimInput(icFile) ###########
#prep.inputRoot = PHOSIMIN
prep.inputRoot = PHOSIMPSCRATCH
prep.phosimInstDir = PHOSIMINST
prep.SEDlib = PHOSIMSEDS ## production SEDs
prep.sedFile = 'spectra_files.tar.gz' ## sprinkled SEDs
prep.refCF = PHOSIMCF ## cmd file template (may require editing)
prep.persistentScratch = True ## dynamically generated instance catalog + SEDs
prep.cleanupFlag = False ## DEBUG - keep contents of scratch
(work1,ic,seds,cFile) = prep.run()
print 'Return from prep.run:'
print ' work1 = ',work1
print ' ic = ',ic
print ' seds = ',seds
print ' cFile = ',cFile
print
## Which sensor is being simulated?
sensor = os.environ['TW_SENSOR']
## Prepare output directory in permanent NFS space
outDir = os.path.join(os.environ['TW_PHOSIMOUT'],sensor,'output')
if not os.access(outDir,os.W_OK):
log.info('Creating output dir '+outDir)
os.makedirs(outDir)
pass
## Prepare work directory in permanent NFS space (but may not use it)
work2 = os.path.join(os.environ['TW_PHOSIMOUT'],sensor,'work')
if not os.access(work2,os.W_OK):
log.info('Creating NFS workdir '+work2)
os.makedirs(work2)
pass
## Select either the scratch (work1) or permanent (work2) version of the workDir
workDir = work1
## Assess whether checkpointing is in progress and prepare, if necessary
prep.cpPrep(workDir)
## DEBUG stuff
prep.dump()
## Prepare phoSim command
log.info('Prepare phoSim command')
cmd = PHOSIMINST+'/phosim.py '+ic+' -c '+cFile+' -s '+sensor+' '+PHOSIMOPTS+' --sed '+seds+' -w '+workDir+' -o '+outDir
#cmd = 'time '+PHOSIMINST+'/phosim.py '+ic+' -c '+cFile+' -s '+sensor+' '+PHOSIMOPTS+' --sed '+seds+' -w '+workDir+' -o '+outDir
#cmd = 'time '+PHOSIMINST+'/phosim.py '+ic+' -c '+cFile+' -s '+sensor+' '+PHOSIMOPTS+' -w '+workDir+' -o '+outDir
print 'cmd = ',cmd
sys.stdout.flush()
## Execute phoSim
if prep.checkpoint:
log.info('Execute phoSim at checkpoint '+str(prep.nextCP)+' in [0,'+str(prep.reqCP)+']')
else:
log.info('Execute phoSim\n\n\n')
pass
sys.stdout.flush()
## New way to execute phoSim, buffers stderr in case phosim does not adjust the return code properly
cmdList = shlex.split(cmd)
xphosim = subprocess.Popen(cmdList, stderr=subprocess.PIPE)
yak = xphosim.communicate()
rc = xphosim.returncode
print 'phoSim rc = ',rc,', type(rc) = ',type(rc)
if len(yak[1]) > 0:
print '\n\n$WARNING: phoSim stderr output:\n',yak[1]
print 'TERMINATING...'
sys.exit(1)
# if rc == 0:
# print '\nphoSim returned error output but a zero return code...setting rc=1'
# rc = 1
# pass
pass
## Original way of executing phoSim
#rc = os.system(cmd)
sys.stdout.flush()
## phoSim complete
print '\n\n\n******************************************************************'
print ' phoSim exited'
print '******************************************************************\n'
print 'phoSim rc = ',rc
if rc > 255: ## phoSim can return rc=256 which maps to '0' :(
rc = rc % 255
print 'Converting illegal return code to range [0,255]. New rc = ',rc
pass
## Take a look at phoSim work and output directories
cmd = 'ls -ltraF '+workDir
print 'Contents of /work, ',workDir
sys.stdout.flush()
os.system(cmd)
sys.stdout.flush()
print '******************************************************************\n'
cmd = 'ls -ltraF '+outDir
print 'Contents of /output, ',outDir
sys.stdout.flush()
os.system(cmd)
sys.stdout.flush()
print '******************************************************************\n'
## Special handling of checkpointing
if prep.checkpoint:
log.info('Checkpointing active')
print 'prep.reqCP = ',int(prep.reqCP)
print 'prep.nextCP = ',int(prep.nextCP)
if int(prep.reqCP) == int(prep.nextCP):
log.info("Final checkpoint segment complete: "+str(prep.reqCP))
else:
log.info("Completed checkpoint "+str(prep.nextCP)+" in [0,"+str(prep.reqCP)+']')
## rc = special value for Pipeline
rc=3
pass
pass
## Clean up scratch area
log.info('Clean up scratch area')
prep.clean()
log.info('Exit with rc='+str(rc))
sys.exit(rc)
""" From phoSim v3.5.2:
Usage: phosim.py instance_catalog [<arg1> <arg2> ...]
Options:
-h, --help show this help message and exit
-c EXTRACOMMANDS, --command=EXTRACOMMANDS
command file to modify the default physics
-p NUMPROC, --proc=NUMPROC
number of processors
-o OUTPUTDIR, --output=OUTPUTDIR
output image directory
-w WORKDIR, --work=WORKDIR
temporary work directory
-b BINDIR, --bin=BINDIR
binary file directory
-d DATADIR, --data=DATADIR
data directory
--sed=SEDDIR SED file directory
--image=IMAGEDIR postage stamp image directory
-s SENSOR, --sensor=SENSOR
sensor chip specification (e.g., all, R22_S11,
"R22_S11|R22_S12")
-i INSTRUMENT, --instrument=INSTRUMENT
instrument site directory
-g GRID, --grid=GRID execute remotely (no, condor, cluster, diagrid)
-u UNIVERSE, --universe=UNIVERSE
condor universe (standard, vanilla)
-e E2ADC, --e2adc=E2ADC
whether to generate amplifier images (1 = true, 0 =
false)
--keepscreens=KEEPSCREENS
whether to keep atmospheric phase screens (0 = false,
1 = true)
--checkpoint=CHECKPOINT
number of checkpoints (condor only)
-v, --version prints the version
"""
|
LSSTDESC/Twinkles
|
workflows/TW-phoSim-r3/runPhoSim.py
|
Python
|
mit
| 7,723
|
[
"VisIt"
] |
3a8ed1dd5be67cf2d541ecb9ef6a70345913bd9ab13ccf2390172f90ee87ed3c
|
# Copyright 2000 by Bertrand Frottier . All rights reserved.
# Revisions 2005-2006 copyright Michiel de Hoon
# Revisions 2006-2009 copyright Peter Cock
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This module provides code to work with the BLAST XML output
following the DTD available on the NCBI FTP
ftp://ftp.ncbi.nlm.nih.gov/blast/documents/xml/NCBI_BlastOutput.dtd
Classes:
BlastParser Parses XML output from BLAST (direct use discouraged).
This (now) returns a list of Blast records.
Historically it returned a single Blast record.
You are expected to use this via the parse or read functions.
_XMLParser Generic SAX parser (private).
Functions:
parse Incremental parser, this is an iterator that returns
Blast records. It uses the BlastParser internally.
read Returns a single Blast record. Uses the BlastParser internally.
"""
from Bio.Blast import Record
import xml.sax
from xml.sax.handler import ContentHandler
class _XMLparser(ContentHandler):
"""Generic SAX Parser
Just a very basic SAX parser.
Redefine the methods startElement, characters and endElement.
"""
def __init__(self, debug=0):
"""Constructor
debug - integer, amount of debug information to print
"""
self._tag = []
self._value = ''
self._debug = debug
self._debug_ignore_list = []
def _secure_name(self, name):
"""Removes 'dangerous' from tag names
name -- name to be 'secured'
"""
# Replace '-' with '_' in XML tag names
return name.replace('-', '_')
def startElement(self, name, attr):
"""Found XML start tag
No real need of attr, BLAST DTD doesn't use them
name -- name of the tag
attr -- tag attributes
"""
self._tag.append(name)
# Try to call a method (defined in subclasses)
method = self._secure_name('_start_' + name)
#Note could use try / except AttributeError
#BUT I found often triggered by nested errors...
if hasattr(self, method):
eval("self.%s()" % method)
if self._debug > 4:
print "NCBIXML: Parsed: " + method
else:
# Doesn't exist (yet)
if method not in self._debug_ignore_list:
if self._debug > 3:
print "NCBIXML: Ignored: " + method
self._debug_ignore_list.append(method)
#We don't care about white space in parent tags like Hsp,
#but that white space doesn't belong to child tags like Hsp_midline
if self._value.strip():
raise ValueError("What should we do with %s before the %s tag?" \
% (repr(self._value), name))
self._value = ""
def characters(self, ch):
"""Found some text
ch -- characters read
"""
self._value += ch # You don't ever get the whole string
def endElement(self, name):
"""Found XML end tag
name -- tag name
"""
# DON'T strip any white space, we may need it e.g. the hsp-midline
# Try to call a method (defined in subclasses)
method = self._secure_name('_end_' + name)
#Note could use try / except AttributeError
#BUT I found often triggered by nested errors...
if hasattr(self, method):
eval("self.%s()" % method)
if self._debug > 2:
print "NCBIXML: Parsed: " + method, self._value
else:
# Doesn't exist (yet)
if method not in self._debug_ignore_list:
if self._debug > 1:
print "NCBIXML: Ignored: " + method, self._value
self._debug_ignore_list.append(method)
# Reset character buffer
self._value = ''
class BlastParser(_XMLparser):
"""Parse XML BLAST data into a Record.Blast object
All XML 'action' methods are private methods and may be:
_start_TAG called when the start tag is found
_end_TAG called when the end tag is found
"""
def __init__(self, debug=0):
"""Constructor
debug - integer, amount of debug information to print
"""
# Calling superclass method
_XMLparser.__init__(self, debug)
self._parser = xml.sax.make_parser()
self._parser.setContentHandler(self)
# To avoid ValueError: unknown url type: NCBI_BlastOutput.dtd
self._parser.setFeature(xml.sax.handler.feature_validation, 0)
self._parser.setFeature(xml.sax.handler.feature_namespaces, 0)
self._parser.setFeature(xml.sax.handler.feature_external_pes, 0)
self._parser.setFeature(xml.sax.handler.feature_external_ges, 0)
self.reset()
def reset(self):
"""Reset all the data allowing reuse of the BlastParser() object"""
self._records = []
self._header = Record.Header()
self._parameters = Record.Parameters()
self._parameters.filter = None #Maybe I should update the class?
def _start_Iteration(self):
self._blast = Record.Blast()
pass
def _end_Iteration(self):
# We stored a lot of generic "top level" information
# in self._header (an object of type Record.Header)
self._blast.reference = self._header.reference
self._blast.date = self._header.date
self._blast.version = self._header.version
self._blast.database = self._header.database
self._blast.application = self._header.application
# These are required for "old" pre 2.2.14 files
# where only <BlastOutput_query-ID>, <BlastOutput_query-def>
# and <BlastOutput_query-len> were used. Now they
# are suplemented/replaced by <Iteration_query-ID>,
# <Iteration_query-def> and <Iteration_query-len>
if not hasattr(self._blast, "query") \
or not self._blast.query:
self._blast.query = self._header.query
if not hasattr(self._blast, "query_id") \
or not self._blast.query_id:
self._blast.query_id = self._header.query_id
if not hasattr(self._blast, "query_letters") \
or not self._blast.query_letters:
self._blast.query_letters = self._header.query_letters
# Hack to record the query length as both the query_letters and
# query_length properties (as in the plain text parser, see
# Bug 2176 comment 12):
self._blast.query_length = self._blast.query_letters
# Perhaps in the long term we should deprecate one, but I would
# prefer to drop query_letters - so we need a transition period
# with both.
# Hack to record the claimed database size as database_length
# (as well as in num_letters_in_database, see Bug 2176 comment 13):
self._blast.database_length = self._blast.num_letters_in_database
# TODO? Deprecate database_letters next?
# Hack to record the claimed database sequence count as database_sequences
self._blast.database_sequences = self._blast.num_sequences_in_database
# Apply the "top level" parameter information
self._blast.matrix = self._parameters.matrix
self._blast.num_seqs_better_e = self._parameters.num_seqs_better_e
self._blast.gap_penalties = self._parameters.gap_penalties
self._blast.filter = self._parameters.filter
self._blast.expect = self._parameters.expect
self._blast.sc_match = self._parameters.sc_match
self._blast.sc_mismatch = self._parameters.sc_mismatch
#Add to the list
self._records.append(self._blast)
#Clear the object (a new empty one is create in _start_Iteration)
self._blast = None
if self._debug : "NCBIXML: Added Blast record to results"
# Header
def _end_BlastOutput_program(self):
"""BLAST program, e.g., blastp, blastn, etc.
Save this to put on each blast record object
"""
self._header.application = self._value.upper()
def _end_BlastOutput_version(self):
"""version number and date of the BLAST engine.
e.g. "BLASTX 2.2.12 [Aug-07-2005]" but there can also be
variants like "BLASTP 2.2.18+" without the date.
Save this to put on each blast record object
"""
parts = self._value.split()
#TODO - Check the first word starts with BLAST?
#The version is the second word (field one)
self._header.version = parts[1]
#Check there is a third word (the date)
if len(parts) >= 3:
if parts[2][0] == "[" and parts[2][-1] == "]":
self._header.date = parts[2][1:-1]
else:
#Assume this is still a date, but without the
#square brackets
self._header.date = parts[2]
def _end_BlastOutput_reference(self):
"""a reference to the article describing the algorithm
Save this to put on each blast record object
"""
self._header.reference = self._value
def _end_BlastOutput_db(self):
"""the database(s) searched
Save this to put on each blast record object
"""
self._header.database = self._value
def _end_BlastOutput_query_ID(self):
"""the identifier of the query
Important in old pre 2.2.14 BLAST, for recent versions
<Iteration_query-ID> is enough
"""
self._header.query_id = self._value
def _end_BlastOutput_query_def(self):
"""the definition line of the query
Important in old pre 2.2.14 BLAST, for recent versions
<Iteration_query-def> is enough
"""
self._header.query = self._value
def _end_BlastOutput_query_len(self):
"""the length of the query
Important in old pre 2.2.14 BLAST, for recent versions
<Iteration_query-len> is enough
"""
self._header.query_letters = int(self._value)
def _end_Iteration_query_ID(self):
"""the identifier of the query
"""
self._blast.query_id = self._value
def _end_Iteration_query_def(self):
"""the definition line of the query
"""
self._blast.query = self._value
def _end_Iteration_query_len(self):
"""the length of the query
"""
self._blast.query_letters = int(self._value)
## def _end_BlastOutput_query_seq(self):
## """the query sequence
## """
## pass # XXX Missing in Record.Blast ?
## def _end_BlastOutput_iter_num(self):
## """the psi-blast iteration number
## """
## pass # XXX TODO PSI
def _end_BlastOutput_hits(self):
"""hits to the database sequences, one for every sequence
"""
self._blast.num_hits = int(self._value)
## def _end_BlastOutput_message(self):
## """error messages
## """
## pass # XXX What to do ?
# Parameters
def _end_Parameters_matrix(self):
"""matrix used (-M)
"""
self._parameters.matrix = self._value
def _end_Parameters_expect(self):
"""expect values cutoff (-e)
"""
# NOTE: In old text output there was a line:
# Number of sequences better than 1.0e-004: 1
# As far as I can see, parameters.num_seqs_better_e
# would take the value of 1, and the expectation
# value was not recorded.
#
# Anyway we should NOT record this against num_seqs_better_e
self._parameters.expect = self._value
## def _end_Parameters_include(self):
## """inclusion threshold for a psi-blast iteration (-h)
## """
## pass # XXX TODO PSI
def _end_Parameters_sc_match(self):
"""match score for nucleotide-nucleotide comparaison (-r)
"""
self._parameters.sc_match = int(self._value)
def _end_Parameters_sc_mismatch(self):
"""mismatch penalty for nucleotide-nucleotide comparaison (-r)
"""
self._parameters.sc_mismatch = int(self._value)
def _end_Parameters_gap_open(self):
"""gap existence cost (-G)
"""
self._parameters.gap_penalties = int(self._value)
def _end_Parameters_gap_extend(self):
"""gap extension cose (-E)
"""
self._parameters.gap_penalties = (self._parameters.gap_penalties,
int(self._value))
def _end_Parameters_filter(self):
"""filtering options (-F)
"""
self._parameters.filter = self._value
## def _end_Parameters_pattern(self):
## """pattern used for phi-blast search
## """
## pass # XXX TODO PSI
## def _end_Parameters_entrez_query(self):
## """entrez query used to limit search
## """
## pass # XXX TODO PSI
# Hits
def _start_Hit(self):
self._blast.alignments.append(Record.Alignment())
self._blast.descriptions.append(Record.Description())
self._blast.multiple_alignment = []
self._hit = self._blast.alignments[-1]
self._descr = self._blast.descriptions[-1]
self._descr.num_alignments = 0
def _end_Hit(self):
#Cleanup
self._blast.multiple_alignment = None
self._hit = None
self._descr = None
def _end_Hit_id(self):
"""identifier of the database sequence
"""
self._hit.hit_id = self._value
self._hit.title = self._value + ' '
def _end_Hit_def(self):
"""definition line of the database sequence
"""
self._hit.hit_def = self._value
self._hit.title += self._value
self._descr.title = self._hit.title
def _end_Hit_accession(self):
"""accession of the database sequence
"""
self._hit.accession = self._value
self._descr.accession = self._value
def _end_Hit_len(self):
self._hit.length = int(self._value)
# HSPs
def _start_Hsp(self):
#Note that self._start_Hit() should have been called
#to setup things like self._blast.multiple_alignment
self._hit.hsps.append(Record.HSP())
self._hsp = self._hit.hsps[-1]
self._descr.num_alignments += 1
self._blast.multiple_alignment.append(Record.MultipleAlignment())
self._mult_al = self._blast.multiple_alignment[-1]
# Hsp_num is useless
def _end_Hsp_score(self):
"""raw score of HSP
"""
self._hsp.score = float(self._value)
if self._descr.score == None:
self._descr.score = float(self._value)
def _end_Hsp_bit_score(self):
"""bit score of HSP
"""
self._hsp.bits = float(self._value)
if self._descr.bits == None:
self._descr.bits = float(self._value)
def _end_Hsp_evalue(self):
"""expect value value of the HSP
"""
self._hsp.expect = float(self._value)
if self._descr.e == None:
self._descr.e = float(self._value)
def _end_Hsp_query_from(self):
"""offset of query at the start of the alignment (one-offset)
"""
self._hsp.query_start = int(self._value)
def _end_Hsp_query_to(self):
"""offset of query at the end of the alignment (one-offset)
"""
self._hsp.query_end = int(self._value)
def _end_Hsp_hit_from(self):
"""offset of the database at the start of the alignment (one-offset)
"""
self._hsp.sbjct_start = int(self._value)
def _end_Hsp_hit_to(self):
"""offset of the database at the end of the alignment (one-offset)
"""
self._hsp.sbjct_end = int(self._value)
## def _end_Hsp_pattern_from(self):
## """start of phi-blast pattern on the query (one-offset)
## """
## pass # XXX TODO PSI
## def _end_Hsp_pattern_to(self):
## """end of phi-blast pattern on the query (one-offset)
## """
## pass # XXX TODO PSI
def _end_Hsp_query_frame(self):
"""frame of the query if applicable
"""
self._hsp.frame = (int(self._value),)
def _end_Hsp_hit_frame(self):
"""frame of the database sequence if applicable
"""
self._hsp.frame += (int(self._value),)
def _end_Hsp_identity(self):
"""number of identities in the alignment
"""
self._hsp.identities = int(self._value)
def _end_Hsp_positive(self):
"""number of positive (conservative) substitutions in the alignment
"""
self._hsp.positives = int(self._value)
def _end_Hsp_gaps(self):
"""number of gaps in the alignment
"""
self._hsp.gaps = int(self._value)
def _end_Hsp_align_len(self):
"""length of the alignment
"""
self._hsp.align_length = int(self._value)
## def _en_Hsp_density(self):
## """score density
## """
## pass # XXX ???
def _end_Hsp_qseq(self):
"""alignment string for the query
"""
self._hsp.query = self._value
def _end_Hsp_hseq(self):
"""alignment string for the database
"""
self._hsp.sbjct = self._value
def _end_Hsp_midline(self):
"""Formatting middle line as normally seen in BLAST report
"""
self._hsp.match = self._value # do NOT strip spaces!
assert len(self._hsp.match)==len(self._hsp.query)
assert len(self._hsp.match)==len(self._hsp.sbjct)
# Statistics
def _end_Statistics_db_num(self):
"""number of sequences in the database
"""
self._blast.num_sequences_in_database = int(self._value)
def _end_Statistics_db_len(self):
"""number of letters in the database
"""
self._blast.num_letters_in_database = int(self._value)
def _end_Statistics_hsp_len(self):
"""the effective HSP length
"""
self._blast.effective_hsp_length = int(self._value)
def _end_Statistics_eff_space(self):
"""the effective search space
"""
self._blast.effective_search_space = float(self._value)
def _end_Statistics_kappa(self):
"""Karlin-Altschul parameter K
"""
self._blast.ka_params = float(self._value)
def _end_Statistics_lambda(self):
"""Karlin-Altschul parameter Lambda
"""
self._blast.ka_params = (float(self._value),
self._blast.ka_params)
def _end_Statistics_entropy(self):
"""Karlin-Altschul parameter H
"""
self._blast.ka_params = self._blast.ka_params + (float(self._value),)
def read(handle, debug=0):
"""Returns a single Blast record (assumes just one query).
This function is for use when there is one and only one BLAST
result in your XML file.
Use the Bio.Blast.NCBIXML.parse() function if you expect more than
one BLAST record (i.e. if you have more than one query sequence).
"""
iterator = parse(handle, debug)
try:
first = iterator.next()
except StopIteration:
first = None
if first is None:
raise ValueError("No records found in handle")
try:
second = iterator.next()
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
return first
def parse(handle, debug=0):
"""Returns an iterator a Blast record for each query.
handle - file handle to and XML file to parse
debug - integer, amount of debug information to print
This is a generator function that returns multiple Blast records
objects - one for each query sequence given to blast. The file
is read incrementally, returning complete records as they are read
in.
Should cope with new BLAST 2.2.14+ which gives a single XML file
for mutliple query records.
Should also cope with XML output from older versions BLAST which
gave multiple XML files concatenated together (giving a single file
which strictly speaking wasn't valid XML)."""
from xml.parsers import expat
BLOCK = 1024
MARGIN = 10 # must be at least length of newline + XML start
XML_START = "<?xml"
text = handle.read(BLOCK)
pending = ""
if not text:
#NO DATA FOUND!
raise ValueError("Your XML file was empty")
while text:
#We are now starting a new XML file
if not text.startswith(XML_START):
raise ValueError("Your XML file did not start with %s... "
"but instead %s" \
% (XML_START, repr(text[:20])))
expat_parser = expat.ParserCreate()
blast_parser = BlastParser(debug)
expat_parser.StartElementHandler = blast_parser.startElement
expat_parser.EndElementHandler = blast_parser.endElement
expat_parser.CharacterDataHandler = blast_parser.characters
expat_parser.Parse(text, False)
while blast_parser._records:
record = blast_parser._records[0]
blast_parser._records = blast_parser._records[1:]
yield record
while True:
#Read in another block of the file...
text, pending = pending + handle.read(BLOCK), ""
if not text:
#End of the file!
expat_parser.Parse("", True) # End of XML record
break
#Now read a little bit more so we can check for the
#start of another XML file...
pending = handle.read(MARGIN)
if (text+pending).find("\n" + XML_START) == -1:
# Good - still dealing with the same XML file
expat_parser.Parse(text, False)
while blast_parser._records:
yield blast_parser._records.pop(0)
else:
# This is output from pre 2.2.14 BLAST,
# one XML file for each query!
# Finish the old file:
text, pending = (text+pending).split("\n" + XML_START,1)
pending = XML_START + pending
expat_parser.Parse(text, True) # End of XML record
while blast_parser._records:
yield blast_parser._records.pop(0)
#Now we are going to re-loop, reset the
#parsers and start reading the next XML file
text, pending = pending, ""
break
#this was added because it seems that the Jython expat parser
#was adding records later then the Python one
while blast_parser._records:
yield blast_parser._records.pop(0)
#At this point we have finished the first XML record.
#If the file is from an old version of blast, it may
#contain more XML records (check if text=="").
assert pending==""
assert len(blast_parser._records) == 0
#We should have finished the file!
assert text==""
assert pending==""
assert len(blast_parser._records) == 0
if __name__ == '__main__':
import sys
import os
handle = open(sys.argv[1])
r_list = parse(handle)
for r in r_list:
# Small test
print 'Blast of', r.query
print 'Found %s alignments with a total of %s HSPs' % (len(r.alignments),
reduce(lambda a,b: a+b,
[len(a.hsps) for a in r.alignments]))
for al in r.alignments:
print al.title[:50], al.length, 'bp', len(al.hsps), 'HSPs'
# Cookbook example
E_VALUE_THRESH = 0.04
for alignment in r.alignments:
for hsp in alignment.hsps:
if hsp.expect < E_VALUE_THRESH:
print '*****'
print 'sequence', alignment.title
print 'length', alignment.length
print 'e value', hsp.expect
print hsp.query[:75] + '...'
print hsp.match[:75] + '...'
print hsp.sbjct[:75] + '...'
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Blast/NCBIXML.py
|
Python
|
gpl-2.0
| 24,483
|
[
"BLAST",
"Biopython"
] |
679e8a53711004d0e58ed46a986b15f827ec19cdf7a6d4673ee93b07dbcbd30d
|
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
integrator_type = "langevin2fs_barostat12"
#integrator_type = "langevin2fs"
#integrator_type = "langevin1fs"
#integrator_type = "langevin0.5fs"
n_steps = 500000000
output_frequency = 500
dcd_frequency = 5000
friction = 1.0 / u.picoseconds
temperature = 300. * u.kelvin
pressure = 1.0 * u.atmospheres
#barostat_frequency = 25
barostat_frequency = 12
cutoff = 1.2 * u.nanometers # Towards the high end to avoid cutoff artifacts.
ffxml_filename = "tip3p.xml"
ff = app.ForceField(ffxml_filename)
dcd_filename = "./water/production_%s.dcd" % integrator_type
log_filename = "./water/production_%s.log" % integrator_type
pdb = app.PDBFile("./tip3p.pdb")
topology = pdb.topology
positions = pdb.positions
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
integrators = {
"langevin2fs":mm.LangevinIntegrator(temperature, friction, 2.0 * u.femtoseconds),
"langevin1fs":mm.LangevinIntegrator(temperature, friction, 1.0 * u.femtoseconds),
"langevin0.5fs":mm.LangevinIntegrator(temperature, friction, 0.5 * u.femtoseconds)
}
integrators["langevin2fs_barostat12"] = integrators["langevin2fs"] # Same integrator, but different barostat frequency.
integrator = integrators[integrator_type]
system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostat_frequency))
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(temperature)
simulation.reporters.append(app.DCDReporter(dcd_filename, dcd_frequency))
simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, temperature=True, speed=True, density=True))
print(integrator_type)
simulation.step(n_steps)
|
kyleabeauchamp/DensityTest
|
src/production_water.py
|
Python
|
gpl-2.0
| 1,884
|
[
"OpenMM"
] |
d167e8cf206a9dc873d5bbf568f77bc3ee2335b7b5eb4cc2c8956e109820d585
|
import os
import orca
import pandas as pd
from activitysim import activitysim as asim
from activitysim.cdap import cdap
"""
CDAP stands for Coordinated Daily Activity Pattern, which is a choice of
high-level activity pattern for each person, in a coordinated way with other
members of a person's household.
Because Python requires vectorization of computation, there are some specialized
routines in the cdap directory of activitysim for this purpose. This module
simply applies those utilities using the simulation framework.
"""
@orca.injectable()
def cdap_1_person_spec(configs_dir):
f = os.path.join(configs_dir, 'configs', "cdap_1_person.csv")
return asim.read_model_spec(f).fillna(0)
@orca.injectable()
def cdap_2_person_spec(configs_dir):
f = os.path.join(configs_dir, 'configs', "cdap_2_person.csv")
return asim.read_model_spec(f).fillna(0)
@orca.injectable()
def cdap_3_person_spec(configs_dir):
f = os.path.join(configs_dir, 'configs', "cdap_3_person.csv")
return asim.read_model_spec(f).fillna(0)
@orca.injectable()
def cdap_final_rules(configs_dir):
f = os.path.join(configs_dir, 'configs', "cdap_final_rules.csv")
return asim.read_model_spec(f).fillna(0)
@orca.injectable()
def cdap_all_people(configs_dir):
f = os.path.join(configs_dir, 'configs', "cdap_all_people.csv")
return asim.read_model_spec(f).fillna(0)
@orca.step()
def cdap_simulate(set_random_seed, persons_merged,
cdap_1_person_spec, cdap_2_person_spec, cdap_3_person_spec,
cdap_final_rules, cdap_all_people):
choices = cdap.run_cdap(persons_merged.to_frame(),
"household_id",
"ptype",
cdap_1_person_spec,
cdap_2_person_spec,
cdap_3_person_spec,
cdap_final_rules,
cdap_all_people)
print "Choices:\n", choices.value_counts()
orca.add_column("persons", "cdap_activity", choices)
|
bhargavasana/activitysim
|
activitysim/defaults/models/cdap.py
|
Python
|
agpl-3.0
| 2,056
|
[
"ORCA"
] |
db454916a3ec8ef7e89a70bce736dad2843d080c64e00baffda92bb6d794a54b
|
'''
Implementation of Gaussian Mixture Models.
Author : Aleyna Kara(@karalleyna)
'''
import superimport
import jax.numpy as jnp
from jax import vmap, value_and_grad, jit
from jax.lax import scan
from jax.random import PRNGKey, uniform, split, permutation
from jax.nn import softmax
import distrax
from distrax._src.utils import jittable
import tensorflow_probability as tfp
from mixture_lib import MixtureSameFamily
import matplotlib.pyplot as plt
import itertools
from jax.experimental import optimizers
opt_init, opt_update, get_params = optimizers.adam(5e-2)
class GMM(jittable.Jittable):
def __init__(self, mixing_coeffs, means, covariances):
'''
Initializes Gaussian Mixture Model
Parameters
----------
mixing_coeffs : array
means : array
variances : array
'''
self.model = (mixing_coeffs, means, covariances)
@property
def mixing_coeffs(self):
return self._model.mixture_distribution.probs
@property
def means(self):
return self._model.components_distribution.loc
@property
def covariances(self):
return self._model.components_distribution.covariance()
@property
def model(self):
return self._model
@model.setter
def model(self, value):
mixing_coeffs, means, covariances = value
components_distribution = distrax.as_distribution(
tfp.substrates.jax.distributions.MultivariateNormalFullCovariance(loc=means,
covariance_matrix=covariances,
validate_args=True))
self._model = MixtureSameFamily(mixture_distribution=distrax.Categorical(probs=mixing_coeffs),
components_distribution=components_distribution)
def expected_log_likelihood(self, observations):
'''
Calculates expected log likelihood
Parameters
----------
observations : array(N, seq_len)
Dataset
Returns
-------
* int
Log likelihood
'''
return jnp.sum(self._model.log_prob(observations))
def responsibility(self, observations, comp_dist_idx):
'''
Computes responsibilities, or posterior probability p(z_{comp_dist_idx}|x)
Parameters
----------
observations : array(N, seq_len)
Dataset
comp_dist_idx : int
Index which specifies the specific mixing distribution component
Returns
-------
* array
Responsibilities
'''
return self._model.posterior_marginal(observations).prob(comp_dist_idx)
def responsibilities(self, observations):
'''
Computes responsibilities, or posterior probability p(z|x)
Parameters
----------
observations : array(N, seq_len)
Dataset
Returns
-------
* array
Responsibilities
'''
return self.model.posterior_marginal(observations).probs
def _m_step(self, observations, S, eta):
'''
Maximization step
Parameters
----------
observations : array(N, seq_len)
Dataset
S : array
A prior p(theta) is defined over the parameters to find MAP solutions
eta : int
Returns
-------
* array
Mixing coefficients
* array
Means
* array
Covariances
'''
n_obs, n_comp = observations.shape
def m_step_per_gaussian(responsibility):
effective_prob = responsibility.sum()
mean = (responsibility[:, None] * observations).sum(axis=0) / effective_prob
centralized_observations = (observations - mean)
covariance = responsibility[:, None, None] * jnp.einsum("ij, ik->ikj",
centralized_observations,
centralized_observations)
covariance = covariance.sum(axis=0)
if eta is None:
covariance = covariance / effective_prob
else:
covariance = (S + covariance) / (eta + effective_prob + n_comp + 2)
mixing_coeff = effective_prob / n_obs
return (mixing_coeff, mean, covariance)
mixing_coeffs, means, covariances = vmap(m_step_per_gaussian, in_axes=(1))(self.responsibilities(observations))
return mixing_coeffs, means, covariances
def _add_final_values_to_history(self, history, observations):
'''
Appends the final values of log likelihood, mixing coefficients, means, variances and responsibilities into the
history
Parameters
----------
history : tuple
Consists of values of log likelihood, mixing coefficients, means, variances and responsibilities, which are
found per iteration
observations : array(N, seq_len)
Dataset
Returns
-------
* array
Mean loss values found per iteration
* array
Mixing coefficients found per iteration
* array
Means of Gaussian distribution found per iteration
* array
Covariances of Gaussian distribution found per iteration
* array
Responsibilites found per iteration
'''
ll_hist, mix_dist_probs_hist, comp_dist_loc_hist, comp_dist_cov_hist, responsibility_hist = history
ll_hist = jnp.append(ll_hist, self.expected_log_likelihood(observations))
mix_dist_probs_hist = jnp.vstack([mix_dist_probs_hist, self.mixing_coeffs])
comp_dist_loc_hist = jnp.vstack([comp_dist_loc_hist, self.means[None, :]])
comp_dist_cov_hist = jnp.vstack([comp_dist_cov_hist, self.covariances[None, :]])
responsibility_hist = jnp.vstack([responsibility_hist, jnp.array([self.responsibility(observations, 0)])])
history = (ll_hist, mix_dist_probs_hist, comp_dist_loc_hist, comp_dist_cov_hist, responsibility_hist)
return history
def fit_em(self, observations, num_of_iters, S=None, eta=None):
'''
Fits the model using em algorithm.
Parameters
----------
observations : array(N, seq_len)
Dataset
num_of_iters : int
The number of iterations the training process takes place
S : array
A prior p(theta) is defined over the parameters to find MAP solutions
eta : int
Returns
-------
* array
Mean loss values found per iteration
* array
Mixing coefficients found per iteration
* array
Means of Gaussian distribution found per iteration
* array
Covariances of Gaussian distribution found per iteration
* array
Responsibilites found per iteration
'''
initial_mixing_coeffs = self.mixing_coeffs
initial_means = self.means
initial_covariances = self.covariances
iterations = jnp.arange(num_of_iters)
def train_step(params, i):
self.model = params
log_likelihood = self.expected_log_likelihood(observations)
responsibility = self.responsibility(observations, 0)
mixing_coeffs, means, covariances = self._m_step(observations, S, eta)
return (mixing_coeffs, means, covariances), (log_likelihood, *params, responsibility)
initial_params = (initial_mixing_coeffs,
initial_means,
initial_covariances)
final_params, history = scan(train_step, initial_params, iterations)
self.model = final_params
history = self._add_final_values_to_history(history, observations)
return history
def _make_minibatches(self, observations, batch_size, rng_key):
'''
Creates minibatches consists of the random permutations of the
given observation sequences
Parameters
----------
observations : array(N, seq_len)
Dataset
batch_size : int
The number of observation sequences that will be included in
each minibatch
rng_key : array
Random key of shape (2,) and dtype uint32
Returns
-------
* array(num_batches, batch_size, max_len)
Minibatches
'''
num_train = len(observations)
perm = permutation(rng_key, num_train)
def create_mini_batch(batch_idx):
return observations[batch_idx]
num_batches = num_train // batch_size
batch_indices = perm.reshape((num_batches, -1))
minibatches = vmap(create_mini_batch)(batch_indices)
return minibatches
def _transform_to_covariance_matrix(self, sq_mat):
'''
Takes the upper triangular matrix of the given matrix and then multiplies it by its transpose
https://ericmjl.github.io/notes/stats-ml/estimating-a-multivariate-gaussians-parameters-by-gradient-descent/
Parameters
----------
sq_mat : array
Square matrix
Returns
-------
* array
'''
U = jnp.triu(sq_mat)
U_T = jnp.transpose(U)
return jnp.dot(U_T, U)
def loss_fn(self, params, batch):
"""
Calculates expected mean negative loglikelihood.
Parameters
----------
params : tuple
Consists of mixing coefficients' logits, means and variances of the Gaussian distributions respectively.
batch : array
The subset of observations
Returns
-------
* int
Negative log likelihood
"""
mixing_coeffs, means, untransormed_cov = params
cov_matrix = vmap(self._transform_to_covariance_matrix)(untransormed_cov)
self.model = (softmax(mixing_coeffs), means, cov_matrix)
return -self.expected_log_likelihood(batch) / len(batch)
def update(self, i, opt_state, batch):
'''
Updates the optimizer state after taking derivative
i : int
The current iteration
opt_state : jax.experimental.optimizers.OptimizerState
The current state of the parameters
batch : array
The subset of observations
Returns
-------
* jax.experimental.optimizers.OptimizerState
The updated state
* int
Loss value calculated on the current batch
'''
params = get_params(opt_state)
loss, grads = value_and_grad(self.loss_fn)(params, batch)
return opt_update(i, grads, opt_state), loss
def fit_sgd(self, observations, batch_size, rng_key=None, optimizer=None, num_epochs=3):
'''
Finds the parameters of Gaussian Mixture Model using gradient descent algorithm with the given hyperparameters.
Parameters
----------
observations : array
The observation sequences which Bernoulli Mixture Model is trained on
batch_size : int
The size of the batch
rng_key : array
Random key of shape (2,) and dtype uint32
optimizer : jax.experimental.optimizers.Optimizer
Optimizer to be used
num_epochs : int
The number of epoch the training process takes place
Returns
-------
* array
Mean loss values found per epoch
* array
Mixing coefficients found per epoch
* array
Means of Gaussian distribution found per epoch
* array
Covariances of Gaussian distribution found per epoch
* array
Responsibilites found per epoch
'''
global opt_init, opt_update, get_params
if rng_key is None:
rng_key = PRNGKey(0)
if optimizer is not None:
opt_init, opt_update, get_params = optimizer
opt_state = opt_init((softmax(self.mixing_coeffs), self.means, self.covariances))
itercount = itertools.count()
def epoch_step(opt_state, key):
def train_step(opt_state, batch):
opt_state, loss = self.update(next(itercount), opt_state, batch)
return opt_state, loss
batches = self._make_minibatches(observations, batch_size, key)
opt_state, losses = scan(train_step, opt_state, batches)
params = get_params(opt_state)
mixing_coeffs, means, untransormed_cov = params
cov_matrix = vmap(self._transform_to_covariance_matrix)(untransormed_cov)
self.model = (softmax(mixing_coeffs), means, cov_matrix)
responsibilities = self.responsibilities(observations)
return opt_state, (losses.mean(), *params, responsibilities)
epochs = split(rng_key, num_epochs)
opt_state, history = scan(epoch_step, opt_state, epochs)
params = get_params(opt_state)
mixing_coeffs, means, untransormed_cov = params
cov_matrix = vmap(self._transform_to_covariance_matrix)(untransormed_cov)
self.model = (softmax(mixing_coeffs), means, cov_matrix)
return history
def plot(self, observations, means=None, covariances=None, responsibilities=None,
step=0.01, cmap="viridis", colors=None, ax=None):
'''
Plots Gaussian Mixture Model.
Parameters
----------
observations : array
Dataset
means : array
covariances : array
responsibilities : array
step: float
Step size of the grid for the density contour.
cmap : str
ax : array
'''
means = self.means if means is None else means
covariances = self.covariances if covariances is None else covariances
responsibilities = self.model.posterior_marginal(observations).probs if responsibilities is None \
else responsibilities
colors = uniform(PRNGKey(100), (means.shape[0], 3)) if colors is None else colors
ax = ax if ax is not None else plt.subplots()[1]
min_x, min_y = observations.min(axis=0)
max_x, max_y = observations.max(axis=0)
xs, ys = jnp.meshgrid(jnp.arange(min_x, max_x, step), jnp.arange(min_y, max_y, step))
grid = jnp.vstack([xs.ravel(), ys.ravel()]).T
def multivariate_normal(mean, cov):
'''
Initializes multivariate normal distribution with the given mean and covariance.
Note that the pdf has the same precision with its parameters' dtype.
'''
return tfp.substrates.jax.distributions.MultivariateNormalFullCovariance(loc=mean,
covariance_matrix=cov)
for (means, cov), color in zip(zip(means, covariances), colors):
normal_dist = multivariate_normal(means, cov)
density = normal_dist.prob(grid).reshape(xs.shape)
ax.contour(xs, ys, density, levels=1, colors=color, linewidths=5)
ax.scatter(*observations.T, alpha=0.7, c=responsibilities, cmap=cmap, s=10)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
|
probml/pyprobml
|
scripts/mix_gauss_lib.py
|
Python
|
mit
| 15,633
|
[
"Gaussian"
] |
abf803323380e234efdb55b0c18ecf3913c64313e6082298fceb5cd63c1453ed
|
# -*- coding: utf-8 -*-
"""
==========================================
Brainstorm Elekta phantom dataset tutorial
==========================================
Here we compute the evoked from raw for the Brainstorm Elekta phantom
tutorial dataset. For comparison, see [1]_ and:
http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
from mayavi import mlab
print(__doc__)
###############################################################################
# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
# are read to construct instances of :class:`mne.io.Raw`.
data_path = bst_phantom_elekta.data_path()
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)
###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
# 102 axial magnetometers, and 3 stimulus channels. Let's get the events
# for the phantom, where each dipole (1-32) gets its own event:
events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG2421']
###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
# to save memory:
raw.plot_psd(tmax=60., average=False)
###############################################################################
# Let's use Maxwell filtering to clean the data a bit.
# Ideally we would have the fine calibration and cross-talk information
# for the site of interest, but we don't, so we just do:
raw.fix_mag_coil_types()
raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
###############################################################################
# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
raw.filter(None, 40., fir_design='firwin')
raw.plot(events=events)
###############################################################################
# Now we epoch our data, average it, and look at the first dipole response.
# The first peak appears around 3 ms. Because we low-passed at 40 Hz,
# we can also decimate our data to save memory.
tmin, tmax = -0.1, 0.1
event_id = list(range(1, 33))
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),
decim=3, preload=True)
epochs['1'].average().plot(time_unit='s')
###############################################################################
# Let's use a sphere head geometry model and let's see the coordinate
# alignement and the sphere location. The phantom is properly modeled by
# a single-shell sphere with origin (0., 0., 0.).
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
mne.viz.plot_alignment(raw.info, subject='sample',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain'])
###############################################################################
# Let's do some dipole fits. We first compute the noise covariance,
# then do the fits for each event_id taking the time instant that maximizes
# the global field power.
cov = mne.compute_covariance(epochs, tmax=0)
data = []
for ii in event_id:
evoked = epochs[str(ii)].average()
idx_peak = np.argmax(evoked.copy().pick_types(meg='grad').data.std(axis=0))
t_peak = evoked.times[idx_peak]
evoked.crop(t_peak, t_peak)
data.append(evoked.data[:, 0])
evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
del epochs, raw
dip = fit_dipole(evoked, cov, sphere, n_jobs=1)[0]
###############################################################################
# Now we can compare to the actual locations, taking the difference in mm:
actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()
actual_amp = 100. # nAm
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))
diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
print('mean(position error) = %s' % (np.mean(diffs),))
ax1.bar(event_id, diffs)
ax1.set_xlabel('Dipole index')
ax1.set_ylabel('Loc. error (mm)')
angles = np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1)))
print('mean(angle error) = %s' % (np.mean(angles),))
ax2.bar(event_id, angles)
ax2.set_xlabel('Dipole index')
ax2.set_ylabel('Angle error (rad)')
amps = actual_amp - dip.amplitude / 1e-9
print('mean(abs amplitude error) = %s' % (np.mean(np.abs(amps)),))
ax3.bar(event_id, amps)
ax3.set_xlabel('Dipole index')
ax3.set_ylabel('Amplitude error (nAm)')
fig.tight_layout()
plt.show()
###############################################################################
# Let's plot the positions and the orientations of the actual and the estimated
# dipoles
def plot_pos_ori(pos, ori, color=(0., 0., 0.)):
mlab.points3d(pos[:, 0], pos[:, 1], pos[:, 2], scale_factor=0.005,
color=color)
mlab.quiver3d(pos[:, 0], pos[:, 1], pos[:, 2],
ori[:, 0], ori[:, 1], ori[:, 2],
scale_factor=0.03,
color=color)
mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces=[])
# Plot the position and the orientation of the actual dipole
plot_pos_ori(actual_pos, actual_ori, color=(1., 0., 0.))
# Plot the position and the orientation of the estimated dipole
plot_pos_ori(dip.pos, dip.ori, color=(0., 0., 1.))
|
mne-tools/mne-tools.github.io
|
0.16/_downloads/plot_brainstorm_phantom_elekta.py
|
Python
|
bsd-3-clause
| 6,071
|
[
"Mayavi"
] |
efa27c4664cfbabdfb045036e8d164abbbb8c3b23f546b83efc7c55d6a6f866a
|
# encoding: utf-8
"""nanotube.py - Window for setting up Graphene sheets and ribbons.
"""
import gtk
from ase.gui.widgets import pack, cancel_apply_ok, oops
from ase.gui.setupwindow import SetupWindow
from ase.gui.pybutton import PyButton
from ase.structure import graphene_nanoribbon
import ase
import numpy as np
introtext = """\
Set up a graphene sheet or a graphene nanoribbon. A nanoribbon may
optionally be saturated with hydrogen (or another element).\
"""
py_template = """
from ase.structure import nanotube
atoms = nanotube(%(n)i, %(m)i, length=%(length)i, bond=%(bl).3f, symbol=%(symb)s)
"""
class SetupGraphene(SetupWindow):
"Window for setting up a graphene sheet or nanoribbon."
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title("Graphene")
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
# Choose structure
label = gtk.Label("Structure: ")
self.struct = gtk.combo_box_new_text()
for s in ("Infinite sheet", "Unsaturated ribbon", "Saturated ribbon"):
self.struct.append_text(s)
self.struct.set_active(0)
self.struct.connect('changed', self.update_gui)
pack(vbox, [label, self.struct])
# Orientation
label = gtk.Label("Orientation: ")
self.orient = gtk.combo_box_new_text()
self.orient_text = []
for s in ("zigzag", "armchair"):
self.orient.append_text(s)
self.orient_text.append(s)
self.orient.set_active(0)
self.orient.connect('changed', self.update_gui)
pack(vbox, [label, self.orient])
pack(vbox, gtk.Label(""))
# Choose the element and bond length
label1 = gtk.Label("Element: ")
#label.set_alignment(0.0, 0.2)
self.element = gtk.Entry(max=3)
self.element.set_text("C")
self.element.connect('activate', self.update_element)
self.bondlength = gtk.Adjustment(1.42, 0.0, 1000.0, 0.01)
label2 = gtk.Label(" Bond length: ")
label3 = gtk.Label("Å")
bond_box = gtk.SpinButton(self.bondlength, 10.0, 3)
pack(vbox, [label1, self.element, label2, bond_box, label3])
# Choose the saturation element and bond length
self.sat_label1 = gtk.Label("Saturation: ")
#label.set_alignment(0.0, 0.2)
self.element2 = gtk.Entry(max=3)
self.element2.set_text("H")
self.element2.connect('activate', self.update_element)
self.bondlength2 = gtk.Adjustment(1.12, 0.0, 1000.0, 0.01)
self.sat_label2 = gtk.Label(" Bond length: ")
self.sat_label3 = gtk.Label("Å")
self.bond_box = gtk.SpinButton(self.bondlength2, 10.0, 3)
pack(vbox, [self.sat_label1, self.element2, self.sat_label2,
self.bond_box, self.sat_label3])
self.elementinfo = gtk.Label("")
self.elementinfo.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('#FF0000'))
pack(vbox, [self.elementinfo])
pack(vbox, gtk.Label(""))
# Size
label1 = gtk.Label("Width: ")
label2 = gtk.Label(" Length: ")
self.n = gtk.Adjustment(1, 1, 100, 1)
self.m = gtk.Adjustment(1, 1, 100, 1)
spinn = gtk.SpinButton(self.n, 0, 0)
spinm = gtk.SpinButton(self.m, 0, 0)
pack(vbox, [label1, spinn, label2, spinm])
# Vacuum
label1 = gtk.Label("Vacuum: ")
self.vacuum = gtk.Adjustment(5.0, 0.0, 1000.0, 0.1)
label2 = gtk.Label("Å")
vac_box = gtk.SpinButton(self.vacuum, 10.0, 2)
pack(vbox, [label1, vac_box, label2])
pack(vbox, gtk.Label(""))
# Buttons
#self.pybut = PyButton("Creating a nanoparticle.")
#self.pybut.connect('clicked', self.makeatoms)
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [buts], end=True, bottom=True)
# Finalize setup
self.update_gui()
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def update_element(self, *args):
"Called when a new element may have been entered."
# Assumes the element widget is self.element and that a label
# for errors is self.elementinfo. The chemical symbol is
# placed in self.legalelement - or None if the element is
# invalid.
symb = []
if self.struct.get_active() == 2:
# Saturated nanoribbon
elements = (self.element.get_text(), self.element2.get_text())
else:
elements = (self.element.get_text(), )
for elem in elements:
if not elem:
self.invalid_element(" No element specified!")
return False
try:
z = int(elem)
except ValueError:
# Probably a symbol
try:
z = ase.data.atomic_numbers[elem]
except KeyError:
self.invalid_element()
return False
try:
symb.append(ase.data.chemical_symbols[z])
except KeyError:
self.invalid_element()
return False
self.elementinfo.set_text("")
self.legal_element = symb[0]
if len(symb) == 2:
self.legal_element2 = symb[1]
else:
self.legal_element2 = None
return True
def update_gui(self, *args):
# Saturation element is only relevant for saturated nanoribbons
satur = self.struct.get_active() == 2
for w in (self.element2, self.bond_box):
w.set_sensitive(satur)
# Infinite zigzag sheets must have even width
if self.struct.get_active() == 0 and self.orient.get_active() == 0:
if self.n.value % 2 == 1:
self.n.value += 1
self.n.lower = 2
self.n.step_increment = 2
else:
self.n.lower = 1
self.n.step_increment = 1
def makeatoms(self, *args):
self.update_element()
if self.legal_element is None or (self.struct.get_active() == 2 and
self.legal_element2 is None):
self.atoms = None
self.pybut.python = None
else:
n = int(self.n.value)
m = int(self.m.value)
CC = self.bondlength.value
vacuum = self.vacuum.value
orient = self.orient_text[self.orient.get_active()]
elem = self.legal_element
if self.struct.get_active() == 0:
# Extended sheet
self.atoms = graphene_nanoribbon(n, m, type=orient, C_C=CC,
vacc=vacuum, sheet=True,
main_element=elem)
elif self.struct.get_active() == 1:
# Unsaturated nanoribbon
self.atoms = graphene_nanoribbon(n, m, type=orient, C_C=CC,
vacc=vacuum,
main_element=elem)
elif self.struct.get_active() == 2:
# Saturated nanoribbon
elem2 = self.legal_element2
self.atoms = graphene_nanoribbon(n, m, type=orient, C_C=CC,
C_H=self.bondlength2.value,
vacuum=vacuum,
saturated=True,
main_element=elem,
saturate_element=elem2)
else:
raise RuntimeError("Unknown structure in SetupGraphene!")
# Now, rotate into the xy plane (ase.gui's default view plane)
pos = self.atoms.get_positions()
cell = self.atoms.get_cell()
pbc = self.atoms.get_pbc()
cell[1,1], cell[2,2] = cell[2,2], cell[1,1]
x = pos[:,1].copy()
z = pos[:,2].copy()
pos[:,1] = z
pos[:,2] = x
self.atoms.set_cell(cell)
self.atoms.set_positions(pos)
self.atoms.set_pbc([pbc[0], pbc[2], pbc[1]])
def apply(self, *args):
self.makeatoms()
if self.atoms is not None:
self.gui.new_atoms(self.atoms)
return True
else:
oops("No valid atoms.",
"You have not (yet) specified a consistent set of parameters.")
return False
def ok(self, *args):
if self.apply():
self.destroy()
|
slabanja/ase
|
ase/gui/graphene.py
|
Python
|
gpl-2.0
| 8,902
|
[
"ASE"
] |
c6337de2338bc929da7331488ded07b5283d9dff9e4038b6363d3981e48dea55
|
import os
srcdir = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(os.path.join(srcdir, '..', '..', 'python', 'lib'))
import copy
import numpy as np
from giss import giutil
import icebin
from icebin import ibgrid
from giss.ncutil import copy_nc
import netCDF4
import argparse
#from modele.constants import SHI,LHM,RHOI,RHOS,UI_ICEBIN,UI_NOTHING
# Sample program that tries out IceBin's update_topo() and writes the output to NetCDF.
# The filder ../make_topo must have been run first, for the file z1qx1n_bs1-nogr.nc
file_path = os.environ['MODELE_FILE_PATH'].split(os.pathsep)
TOPOO_IN = giutil.search_file('z1qx1n_bs1-nogr.nc', file_path)
ICEBINO_IN = 'pismsheet_g20_icebin_in.nc'
ELEVMASK_IN = 'pismsheet_elev_mask.nc'
# ---------------------------------------
# Read dimensions
with netCDF4.Dataset(ICEBINO_IN) as nc:
indexingO = ibgrid.Indexing(nc, 'm.gridA.indexing')
indexingHC = ibgrid.Indexing(nc, 'm.indexingHC')
hcdefs_ice = nc.variables['m.hcdefs'][:]
nhc_ice = len(nc.dimensions['m.nhc'])
imO = indexingO.extent[0]
jmO = indexingO.extent[1]
im = imO // 2 # Indices in alphabetical order
jm = jmO // 2 # /2 to convert Ocean to Atmosphere grid
segments = 'legacy,sealand,ec'
nhc_gcm = nhc_ice + 3
# ---------------------------------------
# Create gcmO and wrap to get gcmA. Not yet usable, we haven't done
# anything about foceanAOp or foceanAOm yet.
mmA = icebin.GCMRegridder(ICEBINO_IN).to_modele()
# Allocate arryas for update_topo to write into.
# In ModelE, these are allocated by the GCM.
fhc = np.zeros((nhc_gcm,jm,im), dtype='d')
underice = np.zeros((nhc_gcm,jm,im), dtype='i')
elevE = np.zeros((nhc_gcm,jm,im), dtype='d')
focean = np.zeros((jm,im), dtype='d')
flake = np.zeros((jm,im), dtype='d')
fgrnd = np.zeros((jm,im), dtype='d')
fgice = np.zeros((jm,im), dtype='d')
zatmo = np.zeros((jm,im), dtype='d')
foceanOm0 = np.zeros((jmO,imO), dtype='d')
# ---------------------------------------------------------
with netCDF4.Dataset(ELEVMASK_IN) as nc:
thkI = nc.variables['thk'][:].reshape(-1)
topgI = nc.variables['topg'][:].reshape(-1)
maskI = nc.variables['mask'][:].reshape(-1)
elevI = topgI + thkI
# ---------------------------------------------------------
sigma = (50000., 50000., 1000.)
elevmask_sigmas = {'greenland' : (elevI, maskI, sigma)}
mmA.update_topo(
TOPOO_IN, elevmask_sigmas, True,
segments, 'ec',
fhc, underice, elevE,
focean, flake, fgrnd, fgice, zatmo, foceanOm0)
# ---------------------------------------------------------
with netCDF4.Dataset('TOPOA.nc', 'w') as nc:
nc.createDimension('nhc_gcm', nhc_gcm)
nc.createDimension('jm', jm)
nc.createDimension('im', im)
nc.createDimension('jmO', jmO)
nc.createDimension('imO', imO)
fhc_v = nc.createVariable('fhc', 'd', ('nhc_gcm','jm','im'), zlib=True)
underice_v = nc.createVariable('underice', 'i', ('nhc_gcm','jm','im'), zlib=True)
elevE_v = nc.createVariable('elevE', 'd', ('nhc_gcm','jm','im'), zlib=True)
focean_v = nc.createVariable('focean', 'd', ('jm','im'), zlib=True)
flake_v = nc.createVariable('flake', 'd', ('jm','im'), zlib=True)
fgrnd_v = nc.createVariable('fgrnd', 'd', ('jm','im'), zlib=True)
fgice_v = nc.createVariable('fgice', 'd', ('jm','im'), zlib=True)
zatmo_v = nc.createVariable('zatmo', 'd', ('jm','im'), zlib=True)
foceanOm0_v = nc.createVariable('foceanOm0', 'd', ('jmO','imO'), zlib=True)
fhc_v[:] = fhc
underice_v[:] = underice
elevE_v[:] = elevE
focean_v[:] = focean
flake_v[:] = flake
fgrnd_v[:] = fgrnd
fgice_v[:] = fgice
zatmo_v[:] = zatmo
foceanOm0_v[:] = foceanOm0
|
citibeth/twoway
|
landice2/update_topo.py
|
Python
|
gpl-3.0
| 3,677
|
[
"NetCDF"
] |
6522b94645a30d2d639df92d93bbf39f55b0a9c32d59c7d911f3258fdd45cd64
|
#!/usr/bin/env
"""
BS_Winds_NARR_6hr.py
Compare NARR Winds with NCEP V2 (with Mooring Winds) for 6hr intervals. Uses 3hr NARR and 6hr NCEP
Using Anaconda packaged Python
"""
#System Stack
import datetime
#Science Stack
import numpy as np
from scipy import stats
from netCDF4 import Dataset
# User Stack
import general_utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
from matplotlib.dates import MonthLocator, DateFormatter, DayLocator
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 03, 25)
__modified__ = datetime.datetime(2014, 03, 25)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR', 'NCEP V2','Bering2', 'power law', 'user defined time comparison', 'Winds', 'Bering Sea'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_mf(infiles):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.mf_ncopen(infiles)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def missing2nan(MooringMetData, Mooring_params, fill_value=np.nan):
"""replace known placeholder values with nan or other fill_value"""
for i,v in enumerate(Mooring_params):
not_nan_ind = (MooringMetData[v] == 1e35)
try:
MooringMetData[v][not_nan_ind] = np.nan
except ValueError:
pass #no missing data so not_nan_ind is empty
return MooringMetData
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pytime = []
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEPV1':
""" Hours since 1-1-1"""
base_date=datetime.datetime.strptime('0001-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
python_time = python_time - 1.75 #<-- hack correction accounts for python date discrepancy? and 6hr lead/lag of dataset
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/Data_Local/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------------- Stats/Math Modules --------------------------------------"""
def lin_fit(x, y):
""" rely's on scipy"""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return ( slope, intercept, r_value, p_value, std_err )
def comp_corr( x, y):
"""
Complex Correlations
Parameters:
-----------
x: complex vector 1
y: complex vector 2
Outputs:
--------
complex correlation vector between x and y (orientation independent)
complex correlation angle (ccw rotation of y with respect to x)
Reference:
----------
Kundu, Pijush K., 1976: Ekman Veering Observed near the Ocean Bottom. J. Phys. Oceanogr., 6, 238-242
"""
x = x[0] + 1j* x[1]
y = y[0] + 1j* y[1]
# From equation 3.3
corr = np.inner(np.conjugate(x),y) \
/ (np.sqrt(np.inner(np.conjugate(x),x)) * np.sqrt(np.inner(np.conjugate(y),y)))
corr_mag = np.sqrt(corr.real**2 +corr.imag**2)
corr_angle = np.rad2deg(np.arctan2(corr.imag, corr.real))
"""
# From equation 3.6 and 3.7
# what is the innerproduct of <u1u2 + v1v2> ???
real_c = (x[0]*y[0] + x[1]*y[1]) / (np.sqrt(x[0]**2. + y[0]**2.) * np.sqrt(x[1]**2. + y[1]**2.))
imag_c = 1j * (x[0]*y[1] - x[1]*y[0]) / (np.sqrt(x[0]**2. + y[0]**2.) * np.sqrt(x[1]**2. + y[1]**2.))
corr_angle = np.arctan2((x[0]*y[1] - x[1]*y[0]), (x[0]*y[0] + x[1]*y[1]))
"""
return (corr_mag, corr_angle)
def wind_power_law(comp_orig, height_obs=3., height_interp=10., correction=False):
"""simple power law wind adjustment
default - 3m observations, 10m interpolated height"""
if correction:
wind_cor = comp_orig * (height_interp / height_obs)**(0.143)
else:
wind_cor = comp_orig
return wind_cor
def hourly_2_ave(ltbound,utbound, time, data, time_base=6.):
interval = time_base / 24.
tarray = np.arange(ltbound, utbound,interval)
dmean = np.zeros_like(tarray)
dstd = np.zeros_like(tarray)
for i, val in enumerate(tarray):
ind = (time >= val) & (time < val+interval)
dmean[i] = data[ind].mean()
dstd[i] = data[ind].std()
return ( {'mean':dmean ,'std':dstd, 'time':tarray} )
def cart2wind(cart_angle):
""" 0deg is North, rotate clockwise"""
cart_angle = 90. - cart_angle #rotate so N is 0deg
cart_angle =cart_angle % 360.
return cart_angle
def rotate_coord(angle_rot, mag, dir):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
dir = dir - angle_rot
along = mag * np.sin(np.deg2rad(dir))
cross = mag * np.cos(np.deg2rad(dir))
return (along, cross)
"""---------------------------- Plotting Modules --------------------------------------"""
def quiver_timeseries(time,ucomp,vcomp,magnitude,data_source,station_name):
t_ind = ~(~np.isnan(magnitude) & (magnitude < 100))
ucomp[t_ind] = 0.
vcomp[t_ind] = 0.
magnitude[t_ind] = 0.
fig1, (ax1, ax2) = plt.subplots(2,1)
# Plot quiver
fill1 = ax1.fill_between(time, magnitude, 0, color='k', alpha=0.1)
# Fake 'box' to be able to insert a legend for 'Magnitude'
p = ax1.add_patch(plt.Rectangle((1,1),1,1,fc='k',alpha=0.1))
leg1 = ax1.legend([p], ["Wind magnitude [m/s]"],loc='lower right')
leg1._drawFrame=False
# 1D Quiver plot
q = ax1.quiver(time,0,ucomp,vcomp,color='r',units='y',scale_units='y',
scale = 1,headlength=1,headaxislength=1,width=0.04,alpha=.95)
qk = plt.quiverkey(q,0.2, 0.05, 5,r'$5 \frac{m}{s}$',labelpos='W',
fontproperties={'weight': 'bold'})
# Plot u and v components
# Plot u and v components
#ax1.set_ylim(-magnitude.max(), magnitude.max())
ax1.set_ylim(-25, 25)
ax1.axes.get_xaxis().set_visible(False)
ax1.set_xlim(time.min(),time.max()+0.5)
ax1.set_ylabel("Velocity (m/s)")
ax2.plot(time, vcomp, 'b-')
ax2.plot(time, ucomp, 'g-')
ax2.set_xlim(time.min(),time.max()+0.5)
ax2.set_ylim(-25, 25)
ax2.set_xlabel("Date (UTC)")
ax2.set_ylabel("Velocity (m/s)")
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_major_formatter(DateFormatter('%b %Y'))
ax2.xaxis.set_minor_locator(DayLocator())
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('both')
#fig1.autofmt_xdate()
# Set legend location - See: http://matplotlib.org/users/legend_guide.html#legend-location
leg2 = plt.legend(['v','u'],loc='upper left')
leg2._drawFrame=False
DefaultSize = fig1.get_size_inches()
fig1.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
fig1.suptitle("Ave Wind data for: " + data_source, fontsize=12)
# Save figure (without 'white' borders)
plt.savefig('images/'+ station_name.lower() +'_' + data_source + '_timeseries.png', bbox_inches='tight', dpi = (100))
plt.close(fig1)
def dir_mag_hist(data1, data2, data3):
#set mag bounds
upper_lim = [2,8,100]
lower_lim = [0,2,8]
fig = plt.figure()
ax = plt.subplot(311)
plt.hist([data1[1][(data1[0] > lower_lim[0]) & (data1[0] <= upper_lim[0])], \
data2[1][(data2[0] > lower_lim[0]) & (data2[0] <= upper_lim[0])], \
data3[1][(data3[0] > lower_lim[0]) & (data3[0] <= upper_lim[0])]], np.arange(0,405,45), color=[(0,0,0),(.5,.5,.5),(1,1,1)])
plt.show()
ax = plt.subplot(312)
plt.hist([data1[1][(data1[0] > lower_lim[1]) & (data1[0] <= upper_lim[1])], \
data2[1][(data2[0] > lower_lim[1]) & (data2[0] <= upper_lim[1])], \
data3[1][(data3[0] > lower_lim[1]) & (data3[0] <= upper_lim[1])]], np.arange(0,405,45), color=[(0,0,0),(.5,.5,.5),(1,1,1)])
plt.show()
ax = plt.subplot(313)
plt.hist([data1[1][(data1[0] > lower_lim[2]) & (data1[0] <= upper_lim[2])], \
data2[1][(data2[0] > lower_lim[2]) & (data2[0] <= upper_lim[2])], \
data3[1][(data3[0] > lower_lim[2]) & (data3[0] <= upper_lim[2])]], np.arange(0,405,45), color=[(0,0,0),(.5,.5,.5),(1,1,1)])
plt.show()
"""---------------------------- Main Routine-------------------------------------------"""
"""------Ingest 1D Data--------"""
### NARR Data has the following boundary corners:
# Lambert Conformal
# 12.2N;133.5W, 54.5N; 152.9W, 57.3N; 49.4W ,14.3N;65.1W
year_long = '1997'
year_short = year_long[2:]
moor_sta_long = 'inner1'
moor_sta_short = 'ifm1'
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
NCEP = '/Users/bell/Data_Local/Reanalysis_Files/NCEPV2/6hourly/'
infile_narr = [NARR + 'uwnd.10m.'+year_long+'.nc', NARR + 'vwnd.10m.'+year_long+'.nc']
infile_ncep = [NCEP + 'uwnd.10m.gauss.'+year_long+'.nc', NCEP + 'vwnd.10m.gauss.'+year_long+'.nc']
### Grab grid points for future slicing - assume grid is same in all model output
narrlat_lon = latlon_grid(infile_narr[0])
nceplat_lon = latlon_grid(infile_ncep[0])
multifile=False
if multifile:
try: #two possible file types amp and wpak
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/' + year_long + '/' + moor_sta_long.lower() + \
'/' + year_short + moor_sta_short +'*_amp.nc'
MooringMetData, Mooring_params = from_netcdf_mf(MooringFile)
MooringMetData = missing2nan(MooringMetData, Mooring_params)
except RuntimeError:
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/' + year_long + '/' + moor_sta_long.lower() + \
'/' + year_short + moor_sta_short +'*_wpak.nc'
MooringMetData, Mooring_params = from_netcdf_mf(MooringFile)
MooringMetData = missing2nan(MooringMetData, Mooring_params)
else:
try: #two possible file types amp and wpak
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/' + year_long + '/' + moor_sta_long.lower() + \
'/' + year_short + moor_sta_short +'a_amp.nc'
MooringMetData, Mooring_params = from_netcdf(MooringFile)
MooringMetData = missing2nan(MooringMetData, Mooring_params)
except RuntimeError:
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/' + year_long + '/' + moor_sta_long.lower() + \
'/' + year_short + moor_sta_short +'a_wpak.nc'
MooringMetData, Mooring_params = from_netcdf(MooringFile)
MooringMetData = missing2nan(MooringMetData, Mooring_params)
MooringTime = date2pydate(MooringMetData['time'], MooringMetData['time2'], file_flag='EPIC')
sta_lat = MooringMetData['latitude'][0]
sta_long = MooringMetData['longitude'][0]
### force comparison location
#sta_lat = 56.
#sta_long = 165.
#Find NCEP and NARR nearest point to mooring
narrpt = sphered.nearest_point([sta_lat,-1 * sta_long],narrlat_lon['lat'],narrlat_lon['lon'], '2d')
nceppt = sphered.nearest_point([sta_lat,-1 * sta_long],nceplat_lon['lat'],nceplat_lon['lon']-360., '1d') #grid shift too
#Read in NARR and NCEP data for location chosen
NARR_uwind = from_netcdf_1dsplice(infile_narr[0], None, narrpt[3], narrpt[4])
NARR_vwind = from_netcdf_1dsplice(infile_narr[1], None, narrpt[3], narrpt[4])
NARRTime = date2pydate(NARR_uwind['time'], file_flag='NARR')
NCEP_uwind = from_netcdf_1dsplice(infile_ncep[0], 0, nceppt[3], nceppt[4])
NCEP_vwind = from_netcdf_1dsplice(infile_ncep[1], 0, nceppt[3], nceppt[4])
NCEPTime = date2pydate(NCEP_uwind['time'], file_flag='NCEP')
### calculate 6hr averages for all datasets using NARR time base
time_bin = 6.
time_str = str(time_bin) + 'hr'
NARRDaily_uwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NARRTime, NARR_uwind['uwnd'], time_base=time_bin)
NARRDaily_vwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NARRTime, NARR_vwind['vwnd'], time_base=time_bin)
NCEPDaily_uwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NCEPTime, NCEP_uwind['uwnd'], time_base=time_bin)
NCEPDaily_vwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NCEPTime, NCEP_vwind['vwnd'], time_base=time_bin)
MooringDaily_uwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), MooringTime, \
wind_power_law(MooringMetData['WU_422'][:,0,0,0], correction=True), time_base=time_bin)
MooringDaily_vwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), MooringTime, \
wind_power_law(MooringMetData['WV_423'][:,0,0,0], correction=True), time_base=time_bin)
"""---------------------------- Data Manipulation Routines-----------------------------"""
NARR_wind_mag = np.sqrt(NARRDaily_vwnd['mean']**2. + NARRDaily_uwnd['mean']**2.)
NARR_wind_dir_math = np.rad2deg(np.arctan2(NARRDaily_vwnd['mean'] , NARRDaily_uwnd['mean']))
NCEP_wind_mag = np.sqrt(NCEPDaily_vwnd['mean']**2. + NCEPDaily_uwnd['mean']**2.)
NCEP_wind_dir_math = np.rad2deg(np.arctan2(NCEPDaily_vwnd['mean'] , NCEPDaily_uwnd['mean']))
Mooring_wind_mag = np.sqrt(MooringDaily_uwnd['mean']**2. + MooringDaily_vwnd['mean']**2.)
Mooring_wind_dir_math = np.rad2deg(np.arctan2(MooringDaily_vwnd['mean'] , MooringDaily_uwnd['mean']))
# mask when mooring wasn't available
t_ind = ~np.isnan(Mooring_wind_mag)
### Calculate +-flow and x-flow rotating along coast (~0 degrees bearing for no coordinate shift)
(NARRalong, NARRcross) = rotate_coord(0., NARR_wind_mag, NARR_wind_dir_math)
(NCEPalong, NCEPcross) = rotate_coord(0., NCEP_wind_mag, NCEP_wind_dir_math)
(MOORalong, MOORcross) = rotate_coord(0., Mooring_wind_mag, Mooring_wind_dir_math)
### normalize data
normalize = False
if normalize:
NARRalong = (NARRalong ) / (NARR_wind_mag)
NCEPalong = (NCEPalong ) / (NCEP_wind_mag)
MOORalong = (MOORalong ) / (Mooring_wind_mag)
NARRcross = (NARRcross ) / (NARR_wind_mag)
NCEPcross = (NCEPcross ) / (NCEP_wind_mag)
MOORcross = (MOORcross ) / (Mooring_wind_mag)
"""---------------------------- Plotting Routines--------------------------------------"""
### standard wind / time plots
# NARR
quiver_timeseries(NARRDaily_uwnd['time'],NARRDaily_uwnd['mean'],NARRDaily_vwnd['mean'],NARR_wind_mag,'NARR', moor_sta_long.lower() )
quiver_timeseries(NCEPDaily_uwnd['time'],NCEPDaily_uwnd['mean'],NCEPDaily_vwnd['mean'],NCEP_wind_mag,'NCEP', moor_sta_long.lower() )
quiver_timeseries(MooringDaily_uwnd['time'],MooringDaily_uwnd['mean'],MooringDaily_vwnd['mean'],Mooring_wind_mag,'' + moor_sta_long.lower() + '', moor_sta_long.lower() )
###split timeseries
# 56 represents 2 weeks for 12 hour ave data
len2split = 56
len2split = int(len( MOORalong[t_ind] ))
split_int = np.ceil( len( MOORalong[t_ind] ) / len2split )
print split_int
for add_ind in range(0,int(split_int)):
split_ind = range(add_ind*len2split,(add_ind*len2split)+len2split)
if (split_ind[-1] > len( MOORalong[t_ind] )):
split_ind = range(add_ind*len2split,len( MOORalong[t_ind] ))
print "The last group has the following number of datapoints: " + np.str( len(split_ind) )
print "Group " + np.str(add_ind)
""" Most relevant plots below... along/U-comp coorelations"""
### Along/Cross Shore comparisons Mooring vs NARR/NCEP
# for entire year (mark mooring specific times)
fig = plt.figure(6)
#text locations
right = 0.05
top = .95
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORalong[t_ind][split_ind], NARRalong[t_ind][split_ind])
print "Regression stats for V-comp (along) Mooring v NARR are: %s %s " % (slope, r_value**2)
(coor_mag, coor_angle) = comp_corr((MOORcross[t_ind][split_ind],MOORalong[t_ind][split_ind]),(NARRcross[t_ind][split_ind],NARRalong[t_ind][split_ind]))
print "NARR Complex correlation mag - %s and dir - %s" % (coor_mag, coor_angle)
ax = plt.subplot(221)
p1 = ax.plot(MOORalong[t_ind][split_ind], NARRalong[t_ind][split_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_xticks(np.arange(-15,16,5))
ax.set_yticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' V-comp Flow (m/s)')
ax.set_ylabel(time_str + ' NARR V-comp Flow (m/s)')
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORalong[t_ind][split_ind], NCEPalong[t_ind][split_ind])
print "Regression stats for V-comp (along) Mooring v NCEP are: %s %s " % (slope, r_value**2)
(coor_mag, coor_angle) = comp_corr((MOORcross[t_ind][split_ind],MOORalong[t_ind][split_ind]),(NCEPcross[t_ind][split_ind],NCEPalong[t_ind][split_ind]))
print "NCEP Complex correlation mag - %s and dir - %s" % (coor_mag, coor_angle)
ax = plt.subplot(223)
p1 = ax.plot(MOORalong[t_ind][split_ind], NCEPalong[t_ind][split_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_yticks(np.arange(-15,16,5))
ax.set_xticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' V-comp Flow (m/s)')
ax.set_ylabel(time_str + ' NCEP V-comp Flow (m/s)')
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORcross[t_ind][split_ind], NARRcross[t_ind][split_ind])
print "Regression stats for U-comp Mooring (across) v NARR are: %s %s " % (slope, r_value**2)
ax = plt.subplot(222)
p1 = ax.plot(MOORcross[t_ind][split_ind], NARRcross[t_ind][split_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_xticks(np.arange(-15,16,5))
ax.set_yticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' U-comp Flow (m/s)')
ax.set_ylabel(time_str + ' NARR U-comp Flow (m/s)')
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORcross[t_ind][split_ind], NCEPcross[t_ind][split_ind])
print "Regression stats for U-comp Mooring (across) v NCEP are: %s %s " % (slope, r_value**2)
ax = plt.subplot(224)
p1 = ax.plot(MOORcross[t_ind][split_ind], NCEPcross[t_ind][split_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_xticks(np.arange(-15,16,5))
ax.set_yticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' U-comp Flow (m/s)')
ax.set_ylabel(time_str + ' NCEP U-comp Flow (m/s)')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.5, DefaultSize[1]*1.5) )
plt.savefig('images/' + moor_sta_long.lower() + '_alongacross_comp.png', bbox_inches='tight', dpi = (100))
plt.close()
### Plot geolocations of datasets
plot_loc = True
if plot_loc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=46, \
urcrnrlat=62,llcrnrlon=-180,urcrnrlon=-140, lat_ts=45)
lon_ncep, lat_ncep = np.meshgrid(-1. * nceplat_lon['lon'],nceplat_lon['lat'])
x, y = m(lon_ncep, lat_ncep)
#NARR - array given to define grid
x_narr, y_narr = m(narrlat_lon['lon'],narrlat_lon['lat'])
# Mooring Data
x_moor, y_moor = m(-1. * sta_long,sta_lat)
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
plt.clabel(CS, inline=1, fontsize=8, fmt='%1.0f')
#plot points
m.scatter(x,y,20,marker='+',color='r', alpha=.75)
m.scatter(x_narr,y_narr,20,marker='x',color='b', alpha=.75)
m.scatter(x_moor,y_moor,20,marker='o',color='g', alpha=.75)
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(46,66,4.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-180,-140,5.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.5, DefaultSize[1]*1.5) )
plt.savefig('images/' + moor_sta_long+ '_region.png', bbox_inches='tight', dpi = (100))
plt.close()
|
shaunwbell/FOCI_Analysis
|
ReanalysisRetreival_orig/BS_Winds/BS_Winds_NARR_hr.py
|
Python
|
mit
| 24,994
|
[
"NetCDF"
] |
4569c938372743afa91a882f0f51391f9cc5dfc5ba0e33bda9cc6fe896026091
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from os import path
import urllib
import numpy as np
import ocw.data_source.local as local
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Two Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "wrf_bias_compared_to_knmi"
FILE_1_PATH = path.join('/tmp', FILE_1)
FILE_2_PATH = path.join('/tmp', FILE_2)
if not path.exists(FILE_1_PATH):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
if not path.exists(FILE_2_PATH):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_dataset.values.shape,))
""" Step 2: Temporally Rebin the Data into an Annual Timestep """
print("Temporally Rebinning the Datasets to an Annual Timestep")
knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=365))
wrf_dataset = dsp.temporal_rebin(wrf_dataset, datetime.timedelta(days=365))
print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
print("WRF_Dataset.values shape: %s \n\n" % (wrf_dataset.values.shape,))
""" Step 3: Spatially Regrid the Dataset Objects to a 1 degree grid """
# The spatial_boundaries() function returns the spatial extent of the dataset
print("The KNMI_Dataset spatial bounds (min_lat, max_lat, min_lon, max_lon) are: \n"
"%s\n" % (knmi_dataset.spatial_boundaries(), ))
print("The KNMI_Dataset spatial resolution (lat_resolution, lon_resolution) is: \n"
"%s\n\n" % (knmi_dataset.spatial_resolution(), ))
min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
# Using the bounds we will create a new set of lats and lons on 1 degree step
new_lons = np.arange(min_lon, max_lon, 1)
new_lats = np.arange(min_lat, max_lat, 1)
# Spatially regrid datasets using the new_lats, new_lons numpy arrays
print("Spatially Regridding the KNMI_Dataset...")
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
print("Final shape of the KNMI_Dataset: \n"
"%s\n" % (knmi_dataset.values.shape, ))
print("Spatially Regridding the WRF_Dataset...")
wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)
print("Final shape of the WRF_Dataset: \n"
"%s\n" % (wrf_dataset.values.shape, ))
""" Step 4: Build a Metric to use for Evaluation - Bias for this example """
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()
""" Step 5: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists. Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()
""" Step 6: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0][0]
print("The results are of type: %s" % type(results))
# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")
lats = new_lats
lons = new_lons
fname = OUTPUT_PLOT
gridshape = (4, 5) # 20 Years worth of plots. 20 rows in 1 column
plot_title = "TASMAX Bias of WRF Compared to KNMI (1989 - 2008)"
sub_titles = range(1989, 2009, 1)
plotter.draw_contour_map(results, lats, lons, fname,
gridshape=gridshape, ptitle=plot_title,
subtitles=sub_titles)
|
MJJoyce/climate
|
examples/simple_model_to_model_bias.py
|
Python
|
apache-2.0
| 5,505
|
[
"NetCDF"
] |
db7f988b55ff47a0019edbe9908d9c888c192d9daa9400867dc1d49fe2cd0e2e
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SEED agent using Keras for continuous control tasks."""
import collections
import gin
from seed_rl.agents.policy_gradient.modules import input_normalization
from seed_rl.agents.policy_gradient.modules import running_statistics
from seed_rl.common import utils
import tensorflow as tf
AgentOutput = collections.namedtuple('AgentOutput',
'action policy_logits baseline')
@gin.configurable
class ContinuousControlAgent(tf.Module):
"""Agent for continuous control tasks."""
def __init__(self,
parametric_action_distribution,
observation_normalizer=None,
num_layers_policy=3,
num_layers_value=3,
num_layers_rnn=0,
num_units_policy=256,
num_units_value=256,
num_units_rnn=256,
layer_normalizer=None,
shared=False,
residual_connections=False,
activation=None,
kernel_init='glorot_uniform',
last_kernel_init_value=None,
last_kernel_init_value_scaling=None,
last_kernel_init_policy=None,
last_kernel_init_policy_scaling=None,
correct_observations=False,
std_independent_of_input=False,
input_clipping=None):
"""Creates the ContinuousControlAgent.
Args:
parametric_action_distribution: SEED distribution used for the actions.
observation_normalizer: InputNormalization instance used to normalize
observations or None for no normalization.
num_layers_policy: Integer with the number of hidden layers in the policy
MLP. Needs to be the same as `num_layers_value` if shared=True.
num_layers_value: Integer with the number of hidden layers in the value
MLP. If None, the number of layers is the same as in the policy.
Needs to be the same as `num_layers_policy` if shared=True.
num_layers_rnn: Number of RNN layers.
num_units_policy: Integer with the number of hidden units in the policy
MLP. Needs to be the same as `num_units_value` if shared=True.
num_units_value: Integer with the number of hidden units in the value
MLP. If None, the number of units is the same as in the policy.
Needs to be the same as `num_units_policy` if shared=True.
num_units_rnn: Integer with the number of hidden units in the RNN.
layer_normalizer: Function that returns a tf.keras.Layer instance used to
normalize observations or None for no layer normalization.
shared: Boolean indicating whether the MLPs (except the heads) should be
shared for the value and the policy networks.
residual_connections: Boolean indicating whether residual connections
should be added to all the layers except the first and last ones in the
MLPs.
activation: Activation function to be passed to the dense layers in the
MLPs or None (in which case the swish activation function is used).
kernel_init: tf.keras.initializers.Initializer instance used to initialize
the dense layers of the MLPs.
last_kernel_init_value: tf.keras.initializers.Initializer instance used to
initialize the last dense layers of the value MLP or None (in which case
`kernel_init` is used).
last_kernel_init_value_scaling: None or a float that is used to rescale
the initial weights of the value network.
last_kernel_init_policy: tf.keras.initializers.Initializer instance used
to initialize the last dense layers of the policy MLP or None (in which
case `kernel_init` is used).
last_kernel_init_policy_scaling: None or a float that is used to rescale
the initial weights of the policy network.
correct_observations: Boolean indicating if changes in the
`observation_normalizer` due to updates should be compensated in
trainable compensation variables.
std_independent_of_input: If a Gaussian action distribution is used,
this parameter makes the standard deviation trainable but independent
of the policy input.
input_clipping: None or float that is used to clip input values to range
[-input_clipping, input_clipping] after (potential) input normalization.
"""
super(ContinuousControlAgent, self).__init__(name='continuous_control')
# Default values.
if observation_normalizer is None:
# No input normalization.
observation_normalizer = input_normalization.InputNormalization(
running_statistics.FixedMeanStd())
if activation is None:
activation = swish
if last_kernel_init_value is None:
last_kernel_init_value = kernel_init
last_kernel_init_value = _rescale_initializer(
last_kernel_init_value, last_kernel_init_value_scaling)
if last_kernel_init_policy is None:
last_kernel_init_policy = kernel_init
last_kernel_init_policy = _rescale_initializer(
last_kernel_init_policy, last_kernel_init_policy_scaling)
if layer_normalizer is None:
layer_normalizer = lambda: (lambda x: x)
# Parameters and layers for unroll.
self._parametric_action_distribution = parametric_action_distribution
self.observation_normalizer = observation_normalizer
self._correct_observations = correct_observations
# Build the required submodules.
self._shared = tf.keras.Sequential()
self._policy = tf.keras.Sequential()
self._value = tf.keras.Sequential()
# Build the torso(s).
num_layers_value = num_layers_value or num_layers_policy
num_units_value = num_units_value or num_units_policy
if shared:
if num_layers_value != num_layers_policy:
raise ValueError('If shared=True, num_layers_value needs to be equal to'
' num_layers_policy')
if num_units_value != num_units_policy:
raise ValueError('If shared=True, num_units_value needs to be equal to'
' num_units_policy')
_add_layers(self._shared, num_layers_value, num_units_value, kernel_init,
activation, layer_normalizer, residual_connections)
else:
_add_layers(self._policy,
num_layers_policy, num_units_policy, kernel_init, activation,
layer_normalizer, residual_connections)
_add_layers(self._value, num_layers_value, num_units_value, kernel_init,
activation, layer_normalizer, residual_connections)
# Build the recurrent layers (if needed).
if num_layers_rnn:
lstm_sizes = [num_units_rnn] * num_layers_rnn
lstm_cells = [tf.keras.layers.LSTMCell(size) for size in lstm_sizes]
self._rnn = tf.keras.layers.StackedRNNCells(lstm_cells)
else:
self._rnn = None
# Build the policy head.
normalizer_policy = layer_normalizer()
policy_output_size = self._parametric_action_distribution.param_size
if std_independent_of_input:
policy_output_size //= 2
self._policy.add(
_Layer(policy_output_size,
last_kernel_init_policy, lambda x: x, normalizer_policy, False))
if std_independent_of_input:
self._policy.add(_ConcatTrainableTensor(tf.zeros(policy_output_size,
tf.float32)))
# Build the value head.
normalizer_value = normalizer_policy if shared else layer_normalizer()
self._value.add(
_Layer(1, last_kernel_init_value, lambda x: x, normalizer_value, False))
self._input_clipping = input_clipping
@tf.function
def initial_state(self, batch_size):
if self._rnn is None:
return ()
return self._rnn.get_initial_state(batch_size=batch_size, dtype=tf.float32)
# Not clear why, but if "@tf.function" declarator is placed directly onto
# __call__, training fails with "uninitialized variable *baseline".
# when running on multiple learning tpu cores.
@tf.function
def get_action(self, input_, core_state):
return self.__call__(input_, core_state)
def update_observation_normalization_statistics(self, observations):
"""Updates the observation normalization statistics.
Args:
observations: a batch of observations with shape [time, batch_size,
obs_size].
"""
self.observation_normalizer.update_normalization_statistics(observations)
def __call__(self, input_, core_state, unroll=False, is_training=False):
"""Applies the network.
Args:
input_: A pair (prev_actions: <int32>[batch_size], env_outputs: EnvOutput
structure where each tensor has a [batch_size] front dimension). When
unroll is True, an unroll (sequence of transitions) is expected, and
those tensors are expected to have [time, batch_size] front dimensions.
core_state: Opaque (batched) recurrent state structure corresponding to
the beginning of the input sequence of transitions.
unroll: Whether the input is an unroll (sequence of transitions) or just a
single (batched) transition.
is_training: Enables normalization statistics updates (when unroll is
True).
Returns:
A pair:
- agent_output: AgentOutput structure. Tensors have front dimensions
[batch_size] or [time, batch_size] depending on the value of 'unroll'.
- core_state: Opaque (batched) recurrent state structure.
"""
_, env_outputs = input_
# We first handle initializing and (potentially) updating normalization
# statistics. We only update during the gradient update steps.
# `is_training` is slightly misleading as it is also True during inference
# steps in the training phase. We hence also require unroll=True which
# indicates gradient updates.
training_model_update = is_training and unroll
data = env_outputs[2]
if not self.observation_normalizer.initialized:
if training_model_update:
raise ValueError('It seems unlikely that stats should be updated in the'
' same call where the stats are initialized.')
self.observation_normalizer.init_normalization_stats(data.shape[-1])
if self._rnn is not None:
if unroll:
representations = utils.batch_apply(self._flat_apply_pre_lstm,
(env_outputs,))
representations, core_state = self._apply_rnn(
representations, core_state, env_outputs.done)
outputs = utils.batch_apply(self._flat_apply_post_lstm,
(representations,))
else:
representations = self._flat_apply_pre_lstm(env_outputs)
representations, done = tf.nest.map_structure(
lambda t: tf.expand_dims(t, 0),
(representations, env_outputs.done))
representations, core_state = self._apply_rnn(
representations, core_state, done)
representations = tf.nest.map_structure(
lambda t: tf.squeeze(t, 0), representations)
outputs = self._flat_apply_post_lstm(representations)
else:
# Simplify.
if unroll:
outputs = utils.batch_apply(self._flat_apply_no_lstm, (env_outputs,))
else:
outputs = self._flat_apply_no_lstm(env_outputs)
return outputs, core_state
def _apply_rnn(self, representations, core_state, done):
"""Apply the recurrent part of the network.
Args:
representations: The representations coming out of the non-recurrent
part of the network, tensor of size [num_timesteps, batch_size, depth].
core_state: The recurrent state, given as nested structure of
sub-states. Each sub-states is of size [batch_size, substate_depth].
done: Tensor of size [num_timesteps, batch_size] which indicates
the end of a trajectory.
Returns:
A pair holding the representations coming out of the RNN (tensor of size
[num_timesteps, batch_size, depth]) and the updated RNN state (same size
as the input core_state.
"""
batch_size = tf.shape(representations)[1]
initial_core_state = self._rnn.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
core_output_list = []
for input_, d in zip(tf.unstack(representations), tf.unstack(done)):
# If the episode ended, the core state should be reset before the next.
core_state = tf.nest.map_structure(
lambda x, y, d=d: tf.where(
tf.reshape(d, [d.shape[0]] + [1] * (x.shape.rank - 1)), x, y),
initial_core_state,
core_state)
core_output, core_state = self._rnn(input_, core_state)
core_output_list.append(core_output)
outputs = tf.stack(core_output_list)
return outputs, core_state
def _flat_apply_pre_lstm(self, env_outputs):
_, _, observations, _, _ = env_outputs
# Input normalization.
observations = self.observation_normalizer.normalize(observations)
if self._input_clipping is not None:
observations = tf.clip_by_value(
observations,
-self._input_clipping,
self._input_clipping)
if self._correct_observations:
observations = self.observation_normalizer.correct(observations)
# The actual MLPs with the different heads.
representations = self._shared(observations)
return representations
def _flat_apply_no_lstm(self, env_outputs):
"""Applies the modules."""
representations = self._flat_apply_pre_lstm(env_outputs)
return self._flat_apply_post_lstm(representations)
def _flat_apply_post_lstm(self, representations):
values = self._value(representations)
logits = self._policy(representations)
baselines = tf.squeeze(values, axis=-1)
new_action = self._parametric_action_distribution(logits).sample(seed=None)
return AgentOutput(new_action, logits, baselines)
@gin.configurable
def swish(input_activation):
"""Swish activation function."""
return tf.multiply(input_activation, tf.nn.sigmoid(input_activation))
def _add_layers(sequential, num_layers, num_units, kernel_init, activation,
normalizer, residual_connections):
"""Adds several layers to a tf.keras.Sequential instance."""
for i in range(num_layers):
sequential.add(
_Layer(num_units, kernel_init, activation, normalizer(),
False if i == 0 else residual_connections))
class _Layer(tf.keras.layers.Layer):
"""Custom layer for our MLPs."""
def __init__(self, num_units, kernel_init, activation, normalizer,
residual_connection):
"""Creates a _Layer."""
super(_Layer, self).__init__()
self.dense = tf.keras.layers.Dense(
num_units, kernel_initializer=kernel_init, activation=activation)
self.normalizer = normalizer
self.residual_connection = residual_connection
def call(self, tensor):
new_tensor = self.dense(self.normalizer(tensor))
return tensor + new_tensor if self.residual_connection else new_tensor
class _ConcatTrainableTensor(tf.keras.layers.Layer):
"""Layer which concatenates a trainable tensor to its input."""
def __init__(self, init_value):
"""Creates a layer."""
super(_ConcatTrainableTensor, self).__init__()
assert init_value.ndim == 1
self.init_value = init_value
def build(self, shape):
self.var = tf.Variable(self.init_value, trainable=True)
def call(self, tensor):
return tf.concat(values=[
tensor,
tf.broadcast_to(self.var, tensor.shape[:-1] + self.var.shape)
], axis=-1)
def _rescale_initializer(initializer, rescale):
if rescale is None:
return initializer
if isinstance(initializer, str):
initializer = tf.keras.initializers.get(initializer)
def rescaled_initializer(*args, **kwargs):
return rescale*initializer(*args, **kwargs)
return rescaled_initializer
|
google-research/seed_rl
|
agents/policy_gradient/modules/continuous_control_agent.py
|
Python
|
apache-2.0
| 16,457
|
[
"Gaussian"
] |
e15272a448d20e67f9d278cfe318424e2791ec6c77cb8327691e2505dbaf1024
|
from brian.stdunits import *
from brian.units import *
F = 1
N_SUBPOP = 2
INTERCO_RATE = 0
INTERCO_STRENGTH = 0
PARAMETERS = {
'Common':
{'simu_dt' : 0.05*msecond,
'simu_length' : 3000*msecond,
'resample_dt' : 0.5*msecond,
'N_subpop' : N_SUBPOP,
'N_mitral' : N_SUBPOP*100*F,
'inter_conn_rate' : {0: {1: INTERCO_RATE},
1: {0: INTERCO_RATE}},
'inter_conn_strength' : {0: {1: INTERCO_STRENGTH},
1: {0: INTERCO_STRENGTH}},
'homeostasy': True,
'burnin': 1. # period of the signal to discard for index computation
},
'Input':
{'tau_Ein' : 3*msecond,
'g_Ein0' : 2.*siemens*meter**-2,
'sigma_Ein' : .035*siemens*meter**-2
},
'InputOscillation':
{'f' : 2*Hz,
'C' : 1 # Must be set to 1 for oscillation
},
'Mitral':
{'C_m' : 0.08*farad*meter**-2,
'g_L' : 0.87*siemens*meter**-2,
'E_L' : -64.5*mvolt,
'V_r' : -74*mvolt,
'V_t' : -62*mvolt,
't_refract' : 0.2*msecond
},
'Granule':
{'C_m' : 0.01*farad*meter**-2,
'g_L' : 0.83*siemens*meter**-2,
'E_L' : -70*mvolt,
'g_SD' : 1*siemens*meter**-2,
'g_DS' : 300*siemens*meter**-2
},
'Synapse':
{'V_E' : 0*mvolt,
'V_act_E' : 0*mvolt,
'g_E' : 3.5*siemens*meter**-2/F,
'sigma_E' : 0.01*mvolt,
'alpha_E' : 10*msecond**-1,
'beta_E' : 1./3*msecond**-1,
'V_I' : -80*mvolt,
'V_act_I' : -66.4*mvolt,
'g_I' : 20*siemens*meter**-2,
'sigma_I' : 0.4*mvolt,
'alpha_I' : 5*msecond**-1,
'beta_I' : 1./10*msecond**-1
},
}
|
neuro-lyon/multiglom-model
|
src/paramsets/std_beta.py
|
Python
|
mit
| 1,687
|
[
"Brian"
] |
0b18935355d98fd9861b61ff817626d4bc83b15701fafea31acd6a451546606a
|
import os
import pytest
import abipy.abilab as abilab
import abiflows.fireworks.tasks.abinit_common
import abiflows.fireworks.tasks.abinit_tasks as abinit_tasks
from abipy.electrons.gsr import GsrFile
from abipy.flowtk.utils import Directory
from fireworks.core.rocket_launcher import rapidfire
from abiflows.fireworks.workflows.abinit_workflows import InputFWWorkflow
from abiflows.fireworks.tasks.abinit_tasks import ScfFWTask
#ABINIT_VERSION = "7.11.5"
# pytestmark = [pytest.mark.skipif(not has_abinit(ABINIT_VERSION), reason="Abinit version {} is not in PATH".format(ABINIT_VERSION)),
# pytest.mark.skipif(not has_fireworks(), reason="fireworks package is missing"),
# pytest.mark.skipif(not has_mongodb(), reason="no connection to mongodb")]
pytestmark = [pytest.mark.usefixtures("cleandb"), pytest.mark.check_abiflow]
def match_results(fw, abitask):
fw_gsr_path = Directory(os.path.join(fw.launches[-1].launch_dir, abiflows.fireworks.tasks.abinit_common.OUTDIR_NAME)).has_abiext("GSR")
with GsrFile(fw_gsr_path) as gsr1, abitask.open_gsr() as gsr2:
if gsr1.energy - gsr2.energy > 0.0001:
return False
return True
class ItestCheckScf():
def itest_scf(self, lp, fworker, fwp, tmpdir, benchmark_input_scf):
wf = InputFWWorkflow(benchmark_input_scf, task_type=ScfFWTask)
scf_fw_id = wf.fw.fw_id
old_new = wf.add_to_db(lpad=lp)
scf_fw_id = old_new[scf_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir))
fw = lp.get_fw_by_id(scf_fw_id)
assert fw.state == "COMPLETED"
# Build the flow
flow = abilab.Flow(fwp.workdir, manager=fwp.manager)
work = flow.register_task(benchmark_input_scf, task_class=abilab.ScfTask)
flow.allocate()
flow.build_and_pickle_dump()
# Run t0, and check status
t0 = work[0]
t0.start_and_wait()
t0.check_status()
assert t0.status == t0.S_OK
assert match_results(fw, t0)
def itest_scf_not_converged(self, lp, fworker, fwp, tmpdir, benchmark_input_scf):
old_cwd = os.getcwd()
benchmark_input_scf.set_vars(nstep=4)
wf = InputFWWorkflow(benchmark_input_scf, task_type=ScfFWTask)
scf_fw_id = wf.fw.fw_id
old_new = wf.add_to_db(lpad=lp)
scf_fw_id = old_new[scf_fw_id]
while lp.run_exists(fworker):
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf = lp.get_wf_by_fw_id(scf_fw_id)
assert wf.state == "COMPLETED"
num_restarts_fw = wf.fws[-1].tasks[0].restart_info.num_restarts
# Build the flow
flow = abilab.Flow(fwp.workdir, manager=fwp.manager)
work = flow.register_task(benchmark_input_scf, task_class=abilab.ScfTask)
flow.allocate()
flow.build_and_pickle_dump()
# go to the main dir (to have the abipy configuration files)
os.chdir(old_cwd)
# Run t0, and check status
t0 = work[0]
assert flow.make_scheduler().start() == 0
num_restarts_abiflow = t0.num_restarts
assert num_restarts_fw == num_restarts_abiflow
assert match_results(lp.get_wf_by_fw_id(scf_fw_id).fws[-1], t0)
|
davidwaroquiers/abiflows
|
abiflows/fireworks/integration_tests/itest_check_abiflow.py
|
Python
|
gpl-2.0
| 3,224
|
[
"ABINIT"
] |
36d10e17efc23b5390b59daf15b9ae482bc76cb03a4557a64cd3fdb23c28973c
|
###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Utility functions.
:author: Ludwig Schwardt
:license: Modified BSD
"""
from __future__ import division
from builtins import range
import copy
import numpy as np
import scipy
# ----------------------------------------------------------------------------------------------------------------------
# --- FUNCTIONS
# ----------------------------------------------------------------------------------------------------------------------
def squash(x, flatten_axes, move_to_start=True):
"""Flatten array, but not necessarily all the way to a 1-D array.
This helper function is useful for broadcasting functions of arbitrary
dimensionality along a given array. The array x is transposed and reshaped,
so that the axes with indices listed in *flatten_axes* are collected either
at the start or end of the array (based on the *move_to_start* flag). These
axes are also flattened to a single axis, while preserving the total number
of elements in the array. The reshaping and transposition usually results
in a view of the original array, although a copy may result e.g. if
discontiguous *flatten_axes* are chosen. The two extreme cases are
``flatten_axes = []`` or None, which results in the original array with no
flattening, and ``flatten_axes = range(len(x.shape))``, which is equivalent
to ``x.ravel()`` and therefore full flattening.
Parameters
----------
x : array-like
N-dimensional array to squash
flatten_axes : list of ints
List of axes along which *x* should be flattened
move_to_start : bool, optional
Flag indicating whether flattened axis is moved to start / end of array
Returns
-------
y : array
Semi-flattened version of *x*, as numpy array
Examples
--------
>>> import numpy as np
>>> x = np.zeros((2, 4, 10))
>>> # no flattening, x returned unchanged:
>>> squash(x, [], True).shape
(2, 4, 10)
>>> squash(x, (1), True).shape
(4, 2, 10)
>>> squash(x, (1), False).shape
(2, 10, 4)
>>> squash(x, (0, 2), True).shape
(20, 4)
>>> squash(x, (0, 2), False).shape
(4, 20)
>>> # same as x.ravel():
>>> squash(x, (0, 1, 2), True).shape
(80,)
"""
x = np.asarray(x)
x_shape = np.atleast_1d(np.asarray(x.shape))
# Split list of axes into those that will be flattened and the rest,
# which are considered the main axes
flatten_axes = np.atleast_1d(np.asarray(flatten_axes)).tolist()
if flatten_axes == [None]:
flatten_axes = []
main_axes = list(set(range(len(x_shape))) - set(flatten_axes))
# After flattening, the array will contain `flatten_shape` number of
# `main_shape`-shaped subarrays
flatten_shape = [x_shape[flatten_axes].prod()]
# Don't add any singleton dimensions during flattening - rather leave
# the matrix as is
if flatten_shape == [1]:
flatten_shape = []
main_shape = x_shape[main_axes].tolist()
# Move specified axes to the beginning (or end) of list of axes,
# and transpose and reshape array accordingly
if move_to_start:
return x.transpose(
flatten_axes + main_axes).reshape(flatten_shape + main_shape)
else:
return x.transpose(
main_axes + flatten_axes).reshape(main_shape + flatten_shape)
def unsquash(x, flatten_axes, original_shape, move_from_start=True):
"""Restore an array that was reshaped by :func:`squash`.
Parameters
----------
x : array-like
N-dimensional array to unsquash
flatten_axes : List of ints
List of (original) axes along which *x* was flattened
original_shape : List of ints
Original shape of *x*, before flattening
move_from_start : bool, optional
Flag indicating whether flattened axes were moved to array start / end
Returns
-------
y : array, shape *original_shape*
Restored version of *x*, as numpy array
"""
x = np.asarray(x)
original_shape = np.atleast_1d(np.asarray(original_shape))
# Split list of axes into those that will be flattened and the rest,
# which are considered the main axes
flatten_axes = np.atleast_1d(np.asarray(flatten_axes)).tolist()
if flatten_axes == [None]:
flatten_axes = []
main_axes = list(set(range(len(original_shape))) - set(flatten_axes))
# After unflattening, the flatten_axes will be reconstructed with
# the correct dimensionality
unflatten_shape = original_shape[flatten_axes].tolist()
# Don't add any singleton dimensions during flattening - rather
# leave the matrix as is
if unflatten_shape == [1]:
unflatten_shape = []
main_shape = original_shape[main_axes].tolist()
# Move specified axes from the beginning (or end) of list of axes,
# and transpose and reshape array accordingly
if move_from_start:
return x.reshape(unflatten_shape + main_shape).transpose(
np.array(flatten_axes + main_axes).argsort())
else:
return x.reshape(main_shape + unflatten_shape).transpose(
np.array(main_axes + flatten_axes).argsort())
def scalar(x):
"""Ensure that a variable contains a scalar.
If `x` is a scalar, it is returned unchanged. If `x` is an array with a
single element, that element is extracted. If `x` contains more than one
element, an exception is raised.
Parameters
----------
x : object or array of shape () or shape (1, 1, ...)
Scalar or array equivalent to a scalar
Return
------
scalar_x : object
Original x or single element extracted from array
Raises
------
AssertionError
If `x` contains more than one element
"""
squeezed_x = np.squeeze(x)
assert np.shape(squeezed_x) == (), "Expected array %s to be scalar" % (x,)
return np.atleast_1d(squeezed_x)[0]
def sort_grid(x, y, z):
"""Ensure that the coordinates of a rectangular grid are in ascending order.
Parameters
----------
x : array, shape (M,)
1-D array of x coordinates, in any order
y : array, shape (N,)
1-D array of y coordinates, in any order
z : array, shape (M, N)
2-D array of values which correspond to the coordinates in *x* and *y*
Returns
-------
xx : array, shape (M,)
1-D array of x coordinates, in ascending order
yy : array, shape (N,)
1-D array of y coordinates, in ascending order
zz : array, shape (M, N)
2-D array of values which correspond to coordinates in *xx* and *yy*
"""
x_ind = np.argsort(x)
y_ind = np.argsort(y)
return x[x_ind], y[y_ind], z[x_ind, :][:, y_ind]
def desort_grid(x, y, z):
"""Undo the effect of :func:`sort_grid`.
This shuffles a rectangular grid of values (based on ascending coordinates)
to correspond to the original order.
Parameters
----------
x : array, shape (M,)
1-D array of x coordinates, in the original (possibly unsorted) order
y : array, shape (N,)
1-D array of y coordinates, in the original (possibly unsorted) order
z : array, shape (M, N)
2-D array of values which correspond to sorted (x, y) coordinates
Returns
-------
zz : array, shape (M, N)
2-D array of values which correspond to original coordinates in x and y
"""
x_ind = np.argsort(np.argsort(x))
y_ind = np.argsort(np.argsort(y))
return z[x_ind, :][:, y_ind]
def vectorize_fit_func(func):
"""Factory that creates vectorised version of function to be fitted to data.
This takes functions of the form ``y = f(p, x)`` which cannot handle
sequences of input arrays for ``x``, and wraps it in a loop which calls
``f`` with the column vectors of ``x``, and returns the corresponding
outputs as an array of column vectors.
Parameters
----------
func : function, signature ``y = f(p, x)``
Function ``f(p, x)`` to be vectorised along last dimension of ``x``
Returns
-------
vec_func : function
Vectorised version of function
"""
def vec_func(p, x):
# Move last dimension to front (becomes sequence of column arrays)
column_seq_x = np.rollaxis(np.asarray(x), -1)
# Get corresponding sequence of output column arrays
column_seq_y = np.array([func(p, xx) for xx in column_seq_x])
# Move column dimension back to the end
return np.rollaxis(column_seq_y, 0, len(column_seq_y.shape))
return vec_func
def randomise(interp, x, y, method='shuffle'):
"""Randomise fitted function parameters by resampling residuals.
This allows estimation of the sampling distribution of the parameters of a
fitted function, by repeatedly running this method and collecting the
statistics (e.g. variance) of the parameters of the resulting interpolator
object. Alternatively, it can form part of a bigger Monte Carlo run.
The method assumes that the interpolator has already been fit to data. It
obtains the residuals ``r = y - f(x)``, and resamples them to form ``r*``
according to the specified method. The final step re-fits the interpolator
to the pseudo-data ``(x, f(x) + r*)``, which yields a slightly different
estimate of the function parameters every time the method is called.
The method is therefore non-deterministic. Three resampling methods are
supported:
- 'shuffle': permute the residuals (sample from the residuals without
replacement)
- 'normal': replace the residuals with zero-mean Gaussian noise with the
same variance
- 'bootstrap': sample from the existing residuals, with replacement
Parameters
----------
interp : object
The interpolator object to randomise (not clobbered by this method)
x : array-like
Known input values as a numpy array (typically the data to which the
function was originally fitted)
y : array-like
Known output values as a numpy array (typically the data to which the
function was originally fitted)
method : {'shuffle', 'normal', 'bootstrap'}, optional
Resampling technique used to resample residuals
Returns
-------
random_interp : object
Randomised interpolator object
"""
# Make copy to prevent destruction of original interpolator
random_interp = copy.deepcopy(interp)
true_y = np.asarray(y)
fitted_y = random_interp(x)
residuals = true_y - fitted_y
# Resample residuals
if method == 'shuffle':
np.random.shuffle(residuals.ravel())
elif method == 'normal':
residuals = residuals.std() * np.random.standard_normal(
residuals.shape)
elif method == 'bootstrap':
sample = np.random.randint(residuals.size, size=residuals.size)
residuals = residuals.ravel()[sample].reshape(residuals.shape)
# Refit function on pseudo-data
random_interp.fit(x, fitted_y + residuals)
return random_interp
def pascal(n):
"""Create n-by-n upper triangular Pascal matrix.
This square matrix contains Pascal's triangle as its upper triangle. For
example, for n=5 the output will be::
1 1 1 1 1
0 1 2 3 4
0 0 1 3 6
0 0 0 1 4
0 0 0 0 1
Parameters
----------
n : integer
Positive integer indicating size of desired matrix
Returns
-------
u : array of float, shape (n, n)
Upper triangular Pascal matrix
Notes
-----
For more details on the Pascal matrix, see the Wikipedia entry [1]_. The
matrix is calculated using matrix exponentiation of a superdiagonal matrix.
Although it theoretically consists of integers only, the matrix entries
grow factorially with *n* and typically overflow the integer representation
for n > 100. A less exact floating-point representation is therefore used
instead (similar to setting exact=0 in :func:`scipy.factorial`).
.. [1] http://en.wikipedia.org/wiki/Pascal_matrix
Examples
--------
>>> pascal(4)
array([[ 1., 1., 1., 1.],
[ 0., 1., 2., 3.],
[ 0., 0., 1., 3.],
[ 0., 0., 0., 1.]])
"""
# Create special superdiagonal matrix X
x = np.diag(np.arange(1., n), 1)
# Evaluate matrix exponential Un = exp(X) via direct series expansion,
# since X is nilpotent
# That is, Un = I + X + X^2 / 2! + X^3 / 3! + ... + X^(n-1) / (n-1)!
term = x[:]
# The first two terms [I + X] are trivial
u = np.eye(n) + term
# Accumulate the series terms
for k in range(2, n - 1):
term = np.dot(term, x) / k
u += term
# The last term [X^(n-1) / (n-1)!] is also trivial - a zero matrix
# with a single one in the top right corner
u[0, -1] = 1.
return u
def offset_scale_mat(n, offset=0., scale=1.):
r"""Matrix that transforms polynomial coefficients to account for offset/scale.
This matrix can be used to transform a vector of polynomial coefficients
that operate on scaled and shifted data to a vector of coefficients that
perform the same action on the unscaled and unshifted data. The offset and
scale factor is thereby incorporated into the polynomial coefficients.
Given two *n*-dimensional vectors of coefficients (highest order first),
:math:`p` and :math:`q`, related by
.. math:: \sum_{i=0}^{n-1} p_i \left(\frac{x - m}{s}\right)^{n-1-i} = \sum_{k=0}^{n-1} q_k x^{n-1-k},
with offset :math:`m` and scale :math:`s`, this calculates the matrix
:math:`M` so that :math:`q = M p`.
Parameters
----------
n : integer
Number of polynomial coefficients, equal to (degree + 1)
offset : float, optional
Offset that is subtracted from data
scale : float, optional
Data is divided by this scale
Returns
-------
M : array of float, shape (n, n)
Resulting transformation matrix
Examples
--------
>>> offset_scale_mat(4, 3, 2)
array([[ 0.125, 0. , 0. , 0. ],
[-1.125, 0.25 , 0. , 0. ],
[ 3.375, -1.5 , 0.5 , 0. ],
[-3.375, 2.25 , -1.5 , 1. ]])
"""
poly_offset = np.fliplr(np.vander([-offset], n))
offset_mat = scipy.linalg.toeplitz(poly_offset, np.r_[1., np.zeros(n - 1)])
poly_scale = np.vander([scale], n)
return np.fliplr(np.flipud(pascal(n))) * offset_mat / poly_scale
|
ludwigschwardt/scikits.fitting
|
scikits/fitting/utils.py
|
Python
|
bsd-3-clause
| 15,230
|
[
"Gaussian"
] |
66d8c54002586874aa878501c4c1171280efcfcf458981fb9fa7d127492d350b
|
"""
Unit tests for masquerade.
"""
import json
import pickle
from datetime import datetime
from importlib import import_module
from unittest.mock import patch
import pytest
import ddt
from operator import itemgetter # lint-amnesty, pylint: disable=wrong-import-order
from django.conf import settings
from django.test import TestCase, RequestFactory
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from pytz import UTC
from xblock.runtime import DictKeyValueStore
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from lms.djangoapps.courseware.masquerade import (
MASQUERADE_SETTINGS_KEY,
CourseMasquerade,
MasqueradingKeyValueStore,
get_masquerading_user_group,
setup_masquerade,
)
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase, MasqueradeMixin, masquerade_as_group_member
from lms.djangoapps.courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, set_user_preference
from openedx.features.course_experience import DISABLE_UNIFIED_COURSE_TAB_FLAG
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import StaffFactory
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.partitions.partitions import Group, UserPartition # lint-amnesty, pylint: disable=wrong-import-order
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase, MasqueradeMixin):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC)})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super().setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': str(self.course.id),
'chapter': self.chapter.location.block_id,
'section': self.sequential.location.block_id,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': str(self.course.id),
}
)
return self.client.get(url)
def get_progress_page(self):
"""
Returns the server response for progress page.
"""
url = reverse(
'progress',
kwargs={
'course_id': str(self.course.id),
}
)
return self.client.get(url)
def get_available_masquerade_identities(self):
"""
Returns: the server response for masquerade options
"""
url = reverse(
'masquerade_update',
kwargs={
'course_key_string': str(self.course.id),
}
)
return self.client.get(url)
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content.decode('utf-8')
assert self.sequential_display_name in content, 'Subsection should be visible'
assert staff_debug_expected == ('Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': str(self.course.id),
'usage_id': str(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content.decode('utf-8'))['html']
assert self.problem_display_name in problem_html
assert show_answer_expected == ('Show answer' in problem_html)
def ensure_masquerade_as_group_member(self, partition_id, group_id):
"""
Installs a masquerade for the test_user and test course, to enable the
user to masquerade as belonging to the specific partition/group combination.
Also verifies that the call to install the masquerade was successful.
Arguments:
partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
"""
assert 200 == masquerade_as_group_member(self.test_user, self.course, partition_id, group_id)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
@ddt.ddt
class TestMasqueradeLearnerOptions(StaffMasqueradeTestCase):
"""
Check that 'View as Learner' option is available only if there are NO groups or partitions
"""
@ddt.data(True, False)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_MASQUERADE': True})
def test_masquerade_options_for_learner(self, partitions_enabled):
"""
If there are partitions, then the View as Learner should NOT be available
"""
with patch.dict('django.conf.settings.FEATURES',
{'ENABLE_ENROLLMENT_TRACK_USER_PARTITION': partitions_enabled}):
response = self.get_available_masquerade_identities()
is_learner_available = 'Learner' in map(itemgetter('name'), response.json()['available'])
assert partitions_enabled != is_learner_available
@ddt.ddt
class TestMasqueradeOptionsNoContentGroups(StaffMasqueradeTestCase):
"""
Test that split_test content groups (which are the partitions with a "random" scheme),
do not show up in the masquerade options popup, but cohort groups do appear.
"""
def setUp(self):
super().setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Cohort Group 1'), Group(1, 'Cohort Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
self.user_partition = UserPartition(
0, 'Test User Partition 2', '',
[Group(0, 'Content Group 1'), Group(1, 'Content Group 2')],
scheme_id='random'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@ddt.data(['Cohort Group 1', True], ['Content Group 1', False])
@ddt.unpack
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_MASQUERADE': True})
def testMasqueradeCohortAvailable(self, target, expected):
"""
Args:
target: The partition to check for in masquerade options
expected: Whether to partition should be in the list
"""
response = self.get_available_masquerade_identities()
is_target_available = target in map(itemgetter('name'), response.json()['available'])
assert is_target_available == expected
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@ddt.ddt
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super().setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
json_data = json.loads(self.look_at_question(self.problem_display_name).content.decode('utf-8'))
progress = '{}/{}'.format(str(json_data['current_score']), str(json_data['total_possible']))
return progress
def assertExpectedLanguageInPreference(self, user, expected_language_code):
"""
This method is a custom assertion verifies that a given user has expected
language code in the preference and in cookies.
Arguments:
user: User model instance
expected_language_code: string indicating a language code
"""
assert get_user_preference(user, LANGUAGE_KEY) == expected_language_code
assert self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value == expected_language_code
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertContains(response, "OOGIE BLOOGIE")
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', username=self.student_user.username)
response = self.get_course_info_page()
self.assertContains(response, "OOGIE BLOOGIE")
@ddt.data(
'john', # Non-unicode username
'fôô@bar', # Unicode username with @, which is what the ENABLE_UNICODE_USERNAME feature allows
)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self, username):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
student = UserFactory.create(username=username)
CourseEnrollment.enroll(student, self.course.id)
self.logout()
self.login(student.email, 'test')
# Answer correctly as the student, and check progress.
self.submit_answer('Correct', 'Correct')
assert self.get_progress_detail() == '2/2'
# Log in as staff, and check the problem is unanswered.
self.login_staff()
assert self.get_progress_detail() == '0/2'
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', username=student.username)
assert self.get_progress_detail() == '2/2'
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
assert self.get_progress_detail() == '1/2'
# Reload the page and check we see the student state again.
self.get_courseware_page()
assert self.get_progress_detail() == '2/2'
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
assert self.get_progress_detail() == '0/2'
# Verify the student state did not change.
self.logout()
self.login(student.email, 'test')
assert self.get_progress_detail() == '2/2'
def test_masquerading_with_language_preference(self):
"""
Tests that masquerading as a specific user for the course does not update preference language
for the staff.
Login as a staff user and set user's language preference to english and visit the courseware page.
Set masquerade to view same page as a specific student having different language preference and
revisit the courseware page.
"""
english_language_code = 'en'
set_user_preference(self.test_user, preference_key=LANGUAGE_KEY, preference_value=english_language_code)
self.login_staff()
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
# Set student language preference and set masquerade to view same page the student.
set_user_preference(self.student_user, preference_key=LANGUAGE_KEY, preference_value='es-419')
self.update_masquerade(role='student', username=self.student_user.username)
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content.decode('utf-8')
assert 'OOGIE BLOOGIE' in content
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', username=self.student_user.username)
content = self.get_course_info_page().content.decode('utf-8')
assert 'OOGIE BLOOGIE' in content
def test_masquerade_as_specific_student_progress(self):
"""
Test masquerading as a specific user for progress page.
"""
# Give the student some correct answers, check their progress page
self.login_student()
self.submit_answer('Correct', 'Correct')
student_progress = self.get_progress_page().content.decode('utf-8')
assert '1 of 2 possible points' not in student_progress
assert '2 of 2 possible points' in student_progress
# Staff answers are slightly different
self.login_staff()
self.submit_answer('Incorrect', 'Correct')
staff_progress = self.get_progress_page().content.decode('utf-8')
assert '2 of 2 possible points' not in staff_progress
assert '1 of 2 possible points' in staff_progress
# Should now see the student's scores
self.update_masquerade(role='student', username=self.student_user.username)
masquerade_progress = self.get_progress_page().content.decode('utf-8')
assert '1 of 2 possible points' not in masquerade_progress
assert '2 of 2 possible points' in masquerade_progress
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super().setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_get_masquerade_group(self):
"""
Tests that a staff member can masquerade as being in a group in a user partition
"""
# Verify there is no masquerading group initially
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
assert group is None
# Install a masquerading group
self.ensure_masquerade_as_group_member(0, 1)
# Verify that the masquerading group is returned
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
assert group.id == 1
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super().setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
assert self.kvs.get('a') == 42
assert self.kvs.get('b') is None
assert self.kvs.get('c') == 'OpenCraft'
with pytest.raises(KeyError):
self.kvs.get('d')
assert self.kvs.has('a')
assert self.kvs.has('b')
assert self.kvs.has('c')
assert not self.kvs.has('d')
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
assert self.kvs.get('a') == 'Norwegian Blue'
assert self.kvs.get('b') == 7
assert self.kvs.get('c') == 'OpenCraft'
assert self.kvs.get('d') == 'Giraffe'
for key in 'abd':
assert self.kvs.has(key)
self.kvs.delete(key)
with pytest.raises(KeyError):
self.kvs.get(key)
assert self.kvs.get('c') == 'OpenCraft'
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
assert unpickled_cmasq.user_name is None
class SetupMasqueradeTests(SharedModuleStoreTestCase, ):
"""
Tests for the setup_masquerade function.
"""
def setUp(self):
super().setUp()
self.course = CourseFactory.create(number='setup-masquerade-test', metadata={'start': datetime.now(UTC)})
self.request = RequestFactory().request()
self.staff = StaffFactory(course_key=self.course.id)
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
session_key = "abcdef"
self.request.user = self.staff
self.request.session = import_module(settings.SESSION_ENGINE).SessionStore(session_key)
def test_setup_masquerade(self):
masquerade_settings = {
self.course.id: CourseMasquerade(
course_key=self.course.id,
role='student',
user_name=self.student.username
)
}
self.request.session[MASQUERADE_SETTINGS_KEY] = masquerade_settings
course_masquerade, masquerade_user = setup_masquerade(
self.request,
self.course.id,
staff_access=True
)
# Warning: the SafeSessions middleware relies on the `real_user` attribute to see if a
# user is masquerading as another user. If the name of this attribute is changing, please update
# the check in SafeSessionMiddleware._verify_user_unchanged as well.
assert masquerade_user.real_user == self.staff
assert masquerade_user == self.student
assert self.request.user.masquerade_settings == masquerade_settings
assert course_masquerade == masquerade_settings[self.course.id]
|
edx/edx-platform
|
lms/djangoapps/courseware/tests/test_masquerade.py
|
Python
|
agpl-3.0
| 25,701
|
[
"VisIt"
] |
51188aa43e75640a10a311628a19e668746617bec89f5cb60503334c2a3a0e74
|
import sys
import os
import glob
import inspect
import pylab as pl
from numpy import *
from scipy import optimize
import pickle
import time
import copy
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]) + "/templates")
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from templutils import *
import pylabsetup
pl.ion()
#fits the vacca leibundgut model to data:
# a linear decay, with a gaussian peak on top, an exponential rise, and possibly a second gaussian (typically the Ia second bump around phase=25 days
def minfunc(p, y, x, e, secondg, plot=False):
'''
p is the parameter list
if secondg=1: secondgaussian added
if secondg=0: secondgaussian not
parameters are:
p[0]=first gaussian normalization (negative if fitting mag)
p[1]=first gaussian mean
p[2]=first gaussian sigma
p[3]=linear decay offset
p[4]=linear decay slope
p[5]=exponxential rise slope
p[6]=exponential zero point
p[7]=second gaussian normalization (negative if fitting mag)
p[8]=second gaussian mean
p[9]=second gaussian sigma
'''
if plot:
pl.figure(3)
pl.errorbar(x, y, yerr=e, color='k')
import time
# time.sleep(1)
# print sum(((y-mycavvaccaleib(x,p,secondg=True))**2))
if secondg > 0:
return sum(((y - mycavvaccaleib(x, p, secondg=True)) ** 2) / e ** 2)
else:
return sum(((y - mycavvaccaleib(x, p, secondg=False)) ** 2) / e ** 2)
import scipy.optimize
if __name__ == '__main__':
lcv = np.loadtxt(sys.argv[1], unpack=True)
secondg = False
try:
if int(sys.argv[2]) > 0:
secondg = True
except:
pass
x = lcv[1]
y = lcv[2]
e = lcv[3]
mjd = lcv[0]
ax = pl.figure(0, figsize=(10,5)).add_subplot(111)
#pl.errorbar(x, y, yerr=e, color="#47b56c", label="data")
p0 = [0] * 10
p0[0] = -4
peakdate = x[np.where(y == min(y))[0]]
if len(peakdate) > 1:
peakdate = peakdate[0]
p0[1] = peakdate + 5
p0[2] = 10 # sigma
#pl.draw()
lintail = np.where(x > peakdate + 50)[0]
if len(lintail) < 1:
print "no tail data"
linfit = np.polyfit(x[-2:], y[-2:], 1)
p0[3] = linfit[1]
p0[4] = linfit[0]
else:
linfit = np.polyfit(x[lintail], y[lintail], 1)
p0[3] = linfit[1]
p0[4] = linfit[0]
p0[5] = 0.1
p0[6] = peakdate - 20
p0[7] = -1
p0[8] = peakdate + 25
p0[9] = 10
pl.figure(3)
pl.clf()
# pf= scipy.optimize.minimize(minfunc,p0,args=(y,x,1), method='Powell')#,options={'maxiter':5})
if secondg:
p0[0] += 1.5
p0[1] *= 2
pl.plot(x[10:], mycavvaccaleib(x[10:], p0, secondg=True), 'm')
pf = scipy.optimize.minimize(minfunc, p0, args=(y[10:], x[10:], e[10:], 1), method='Powell') # ,options={'maxiter':5})
else:
pl.plot(x[10:], mycavvaccaleib(x[10:], p0, secondg=False), 'k')
pf = scipy.optimize.minimize(minfunc, p0, args=(y[10:], x[10:], e[10:], 0), method='Powell') # ,options={'maxiter':5})
#pl.figure(4)
pl.figure(0)
ax.errorbar(mjd+0.5-53000, y, yerr=e, fmt=None, ms=7,
alpha = 0.5, color='k', markersize=10,)
ax.plot(mjd+0.5-53000, y, '.', ms=7,
alpha = 0.5, color='#47b56c', markersize=10,
label = "SN 19"+sys.argv[1].split('/')[-1].\
replace('.dat', '').replace('.', ' '))
# mycavvaccaleib(x,pf.x, secondg=True)
mycavvaccaleib(x, pf.x, secondg=secondg)
ax.plot(mjd[10:]+0.5-53000, mycavvaccaleib(x[10:], pf.x, secondg=secondg), 'k',
linewidth=2, label="vacca leibundgut fit") # , alpha=0.5)
# pl.plot(x,mycavvaccaleib(x,pf.x, secondg=True), 'k',linewidth=2, label="fit")
xlen = mjd.max() - mjd.min()
ax.set_xlim(mjd.min()-xlen*0.02+0.5-53000, mjd.max()+xlen*0.02+0.5-53000)
ax.set_ylim(max(y + 0.1), min(y - 0.1))
ax2 = ax.twiny()
Vmax = 2449095.23-2453000
ax2.tick_params('both', length=10, width=1, which='major')
ax2.tick_params('both', length=5, width=1, which='minor')
ax2.set_xlabel("phase (days)")
ax2.set_xlim((ax.get_xlim()[0] - Vmax, ax.get_xlim()[1] - Vmax))
# pl.ylim(10,21)
pl.draw()
pl.legend()
ax.set_xlabel("JD - 24530000")
ax.set_ylabel("magnitude")
#pl.title(sys.argv[1].split('/')[-1].replace('.dat', '').replace('.', ' '))
#pl.show()
pl.tight_layout()
pl.savefig("../fits/" + sys.argv[1].split('/')[-1].replace('.dat', '.vdfit.pdf'))
cmd = "pdfcrop " + "../fits/" + sys.argv[1].split('/')[-1].replace('.dat', '.vdfit.pdf')
print cmd
os.system(cmd)
|
fedhere/SESNCfAlib
|
vaccaleibundgut.py
|
Python
|
mit
| 4,704
|
[
"Gaussian"
] |
12b14804cfcfca5dbb99858a37a48a8b6d499eef48f94492f0082b5585cc8696
|
import argparse
from director.consoleapp import ConsoleApp
from director import cameraview
from director import vtkAll as vtk
def parseChannelArgument(defaultChannel='CAMERA_LEFT'):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--channel', type=str, help='image channel', default=defaultChannel)
args, unknown = parser.parse_known_args()
return args.channel
def main():
app = ConsoleApp()
view = app.createView(useGrid=False)
view.orientationMarkerWidget().Off()
view.backgroundRenderer().SetBackground([0,0,0])
view.backgroundRenderer().SetBackground2([0,0,0])
cameraChannel = parseChannelArgument()
imageManager = cameraview.ImageManager()
imageManager.queue.addCameraStream(cameraChannel)
imageManager.addImage(cameraChannel)
cameraView = cameraview.CameraImageView(imageManager, cameraChannel, view=view)
cameraView.eventFilterEnabled = False
view.renderWindow().GetInteractor().SetInteractorStyle(vtk.vtkInteractorStyleImage())
view.show()
app.start()
if __name__ == '__main__':
main()
|
RobotLocomotion/director
|
src/python/tests/testImageView.py
|
Python
|
bsd-3-clause
| 1,098
|
[
"VTK"
] |
8af9faa6c9cc148b7327c30584a4a0b1285c0f21cd0b0f23fccaee2917886f65
|
import os
import theano
import theano.tensor as T
import numpy
from experimentation.experiments import run_theano_experiments, run_experiments_sklearn
from models.regression.deep_models.mlp import MLP
from models.regression.deep_models.dbn import DBN
from datasets import DatasetManager
from models.regression.sklearn_models.sklearn_network import SklearnNetwork
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn import preprocessing
from sklearn import feature_selection
from models.regression.sklearn_models.rbf import RBF
def sklearn_experiments():
dataset_name = 'cinvestav_labeled.csv'
seed = 5
datasets = DatasetManager.read_dataset(
dataset_name=os.path.join(os.path.dirname(__file__), "dataset", dataset_name),
shared=False,
seed=seed,
expected_output=['result_x', 'result_y'],
skipped_columns=[],
label_encoding_columns_name=[],
sklearn_preprocessing=preprocessing.StandardScaler(with_mean=True, with_std=True),
sklearn_feature_selection=feature_selection.VarianceThreshold(),
train_ratio=1,
test_ratio=0,
valid_ratio=0
)
test_set = DatasetManager.get_prediction_set(
dataset_name=os.path.join(os.path.dirname(__file__), "dataset", 'cinvestav_labeled_test.csv'),
expected_output=['result_x', 'result_y'],
label_encoding_columns_name=[],
skipped_columns=[],
shared=False,
sklearn_preprocessing=datasets['sklearn_preprocessing'],
sklearn_feature_selection=datasets['sklearn_feature_selection'],
)
datasets['test_set'] = test_set
datasets['prediction_set'] = datasets['test_set'][0]
train_set_x, train_set_y = datasets['train_set']
n_in = train_set_x.shape[1]
n_out = train_set_y.shape[1]
# Create Radial Basis Networks
rbf = RBF(
input_length=n_in,
hidden_length=500,
out_lenght=n_out
)
# Create KNN
knn = SklearnNetwork(
sklearn_model=KNeighborsRegressor(n_neighbors=10),
num_output=n_out
)
# Create ada boosting
ada_boosting = SklearnNetwork(
sklearn_model=GradientBoostingRegressor(n_estimators=1000, learning_rate=.1, max_depth=5, loss='ls'),
num_output=n_out
)
models = [
('Ada Boosting', ada_boosting),
('Radar', knn),
('cRBF', rbf)
]
params = {
'datasets': datasets
}
run_experiments_sklearn(
models=models,
seed=seed,
params=params,
experiment_name='traditional_algorithms',
task_type='regression'
)
def theano_experiments():
dataset_name = 'cinvestav_labeled.csv'
seed = 5
rgn = numpy.random.RandomState(seed)
datasets = DatasetManager.read_dataset(
dataset_name=os.path.join(os.path.dirname(__file__), 'dataset', 'meters', dataset_name),
shared=True,
seed=seed,
expected_output=['result_x', 'result_y'],
skipped_columns=[],
label_encoding_columns_name=[],
sklearn_preprocessing=preprocessing.StandardScaler(with_mean=True, with_std=True),
sklearn_feature_selection=feature_selection.VarianceThreshold(),
train_ratio=.8,
test_ratio=0,
valid_ratio=.2
)
test_set = DatasetManager.get_prediction_set(
dataset_name=os.path.join(os.path.dirname(__file__), 'dataset', 'meters', 'cinvestav_labeled_test.csv'),
expected_output=['result_x', 'result_y'],
label_encoding_columns_name=[],
skipped_columns=[],
sklearn_preprocessing=datasets['sklearn_preprocessing'],
sklearn_feature_selection=datasets['sklearn_feature_selection'],
shared=True
)
dataset_unlabeled = DatasetManager.get_prediction_set(
dataset_name=os.path.join(os.path.dirname(__file__), "dataset", 'cinvestav_unlabeled.csv'),
skipped_columns=['result_x', 'result_y'],
label_encoding_columns_name=[],
sklearn_preprocessing=datasets['sklearn_preprocessing'],
sklearn_feature_selection=datasets['sklearn_feature_selection'],
shared=True
)
datasets['test_set'] = test_set
datasets['dataset_unlabeled'] = dataset_unlabeled
datasets['prediction_set'] = datasets['test_set'][0].get_value()
train_set_x, train_set_y = datasets['train_set']
n_in = train_set_x.get_value().shape[1]
n_out = train_set_y.get_value().shape[1]
dnn_tanh_models = get_neural_networks(
n_in,
n_out,
rgn,
activation_function=T.tanh # T.nnet.relu
)
dnn_relu_models = get_neural_networks(
n_in,
n_out,
rgn,
activation_function=T.nnet.relu
)
dnn_sigmoid_models = get_neural_networks(
n_in,
n_out,
rgn,
activation_function=T.nnet.sigmoid
)
dbn_models = get_dbn(
n_in,
n_out,
rgn,
gaussian=False
)
gdbn_models = get_dbn(
n_in,
n_out,
rgn,
gaussian=True
)
models = []
models.extend(dnn_relu_models)
models.extend(dnn_sigmoid_models)
models.extend(dnn_tanh_models)
models.extend(gdbn_models)
models.extend(dbn_models)
params = {
'learning_rate': .01,
'annealing_learning_rate': .99999,
'l1_learning_rate': 0.01,
'l2_learning_rate': 0.001,
'n_epochs': 2000,
'batch_size': 20,
'pre_training_epochs': 50,
'pre_train_lr': 0.01,
'k': 1,
'datasets': datasets,
'noise_rate': .1,
'dropout_rate': None
}
run_theano_experiments(
models=models,
seed=seed,
params=params,
experiment_name='all_models_with_noise_without_dropout',
task_type='regression'
)
def get_neural_networks(n_in, n_out, rgn, activation_function):
if activation_function == theano.tensor.nnet.sigmoid:
activation_function_name = 'sigmoid'
elif activation_function == T.nnet.relu:
activation_function_name = 'relu'
else:
activation_function_name = 'tanh'
l = 500
models = []
for i in range(1, 11, 1):
hidden_layers = [l - 50 * x for x in range(0, i)]
multilayer_perceptron = MLP(
input=T.matrix('x'),
n_in=n_in,
hidden_layers_sizes=hidden_layers,
n_out=n_out,
numpy_rng=rgn,
dropout_rate=None,
activation_function=activation_function
)
models.append(('dnn_layers_' + str(i) + '_func_' + activation_function_name, multilayer_perceptron))
return models
def get_dbn(n_in, n_out, rgn, gaussian):
l = 500
models = []
for i in range(1, 11, 1):
hidden_layers = [l - 50 * x for x in range(0, i)]
gaussian_deep_belief_network = DBN(
n_visible=n_in,
hidden_layers_sizes=hidden_layers,
n_out=n_out,
numpy_rng=rgn,
gaussian_visible=gaussian
)
models.append(('dbn_layers_' + str(i) + '_gaussian_' + str(gaussian), gaussian_deep_belief_network))
return models
if __name__ == '__main__':
# sklearn_experiments()
theano_experiments()
|
gdl-civestav-localization/cinvestav_location_fingerprinting
|
projects/cinvestav_fingerprinting/accuracy_experiment.py
|
Python
|
gpl-3.0
| 7,298
|
[
"Gaussian"
] |
3274e93347ca3e81b892d7b7f1417d65a429acc60d4c7c35d998608038ae2814
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkSplineFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkSplineFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkSplineFilter.py
|
Python
|
bsd-3-clause
| 485
|
[
"VTK"
] |
68cfb17f491741b7676ce43ec3b79cc5639095f695fe5beef457996f822c62f5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.phasediagram.entries import PDEntryIO, PDEntry, \
GrandPotPDEntry, TransformedPDEntry
from pymatgen.core.periodic_table import Element, DummySpecie
from pymatgen.core.composition import Composition
class PDEntryTest(unittest.TestCase):
'''
Test all functions using a ficitious entry
'''
def setUp(self):
comp = Composition("LiFeO2")
self.entry = PDEntry(comp, 53)
self.gpentry = GrandPotPDEntry(self.entry, {Element('O'): 1.5})
def test_get_energy(self):
self.assertEqual(self.entry.energy, 53, "Wrong energy!")
self.assertEqual(self.gpentry.energy, 50, "Wrong energy!")
def test_get_energy_per_atom(self):
self.assertEqual(self.entry.energy_per_atom, 53.0 / 4,
"Wrong energy per atom!")
self.assertEqual(self.gpentry.energy_per_atom, 50.0 / 2,
"Wrong energy per atom!")
def test_get_name(self):
self.assertEqual(self.entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(self.gpentry.name, 'LiFeO2', "Wrong name!")
def test_get_composition(self):
comp = self.entry.composition
expected_comp = Composition('LiFeO2')
self.assertEqual(comp, expected_comp, "Wrong composition!")
comp = self.gpentry.composition
expected_comp = Composition("LiFe")
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.entry.is_element)
self.assertFalse(self.gpentry.is_element)
def test_to_from_dict(self):
d = self.entry.as_dict()
gpd = self.gpentry.as_dict()
entry = PDEntry.from_dict(d)
self.assertEqual(entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 4)
gpentry = GrandPotPDEntry.from_dict(gpd)
self.assertEqual(gpentry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(gpentry.energy_per_atom, 50.0 / 2)
d_anon = d.copy()
del d_anon['name']
try:
entry = PDEntry.from_dict(d_anon)
except KeyError:
self.fail("Should not need to supply name!")
def test_str(self):
self.assertIsNotNone(str(self.entry))
class TransformedPDEntryTest(unittest.TestCase):
'''
Test all functions using a ficitious entry
'''
def setUp(self):
comp = Composition("LiFeO2")
entry = PDEntry(comp, 53)
self.transformed_entry = TransformedPDEntry({DummySpecie('Xa'): 1,
DummySpecie("Xb"): 1},
entry)
def test_get_energy(self):
self.assertEqual(self.transformed_entry.energy, 53, "Wrong energy!")
self.assertEqual(self.transformed_entry.original_entry.energy, 53.0)
def test_get_energy_per_atom(self):
self.assertEqual(self.transformed_entry.energy_per_atom, 53.0 / 2)
def test_get_name(self):
self.assertEqual(self.transformed_entry.name, 'LiFeO2', "Wrong name!")
def test_get_composition(self):
comp = self.transformed_entry.composition
expected_comp = Composition({DummySpecie('Xa'): 1,
DummySpecie('Xb'): 1})
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.transformed_entry.is_element)
def test_to_from_dict(self):
d = self.transformed_entry.as_dict()
entry = TransformedPDEntry.from_dict(d)
self.assertEqual(entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 2)
def test_str(self):
self.assertIsNotNone(str(self.transformed_entry))
class PDEntryIOTestCase(unittest.TestCase):
def test_read_csv(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(elements,
entries) = PDEntryIO.from_csv(os.path.join(module_dir,
"pdentries_test.csv"))
self.assertEqual(elements,
[Element('Li'), Element('Fe'), Element('O')],
"Wrong elements!")
self.assertEqual(len(entries), 492, "Wrong number of entries!")
if __name__ == '__main__':
unittest.main()
|
xhqu1981/pymatgen
|
pymatgen/phasediagram/tests/test_entries.py
|
Python
|
mit
| 4,533
|
[
"pymatgen"
] |
99ed04cdb8dd3004e519aba333baa85aaacd86058f527e45efb35e5a461bf388
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Configuration file for template documentation."""
import os
import sys
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
try:
from sphinxcontrib import spelling
except ImportError as e:
print(e)
spelling = None
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../template/'))
try:
from template import __version__
except ImportError:
print('Cannot load version.')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
if spelling is not None:
extensions.append('sphinxcontrib.spelling')
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'template'
copyright = '2019, Brian Moss'
author = 'Brian Moss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
version = __version__
except NameError:
version = '0.1'
# The full version, including alpha/beta/rc tags.
# release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
# todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = 'sphinx_rtd_theme'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # overrides wide tables in RTD theme
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'doc'
# this will change the 'paragraph' character to '#'
html_add_permalinks = '#'
|
kallimachos/template
|
doc/conf.py
|
Python
|
gpl-3.0
| 7,434
|
[
"Brian"
] |
3c0bb312345a25ff21f9160d5f4d383a7ff3f00f26f44e139a296da830025587
|
""" ElasticDB is a base class used to connect an Elasticsearch database and manages queries.
"""
from DIRAC import gLogger
from DIRAC.Core.Utilities.ElasticSearchDB import ElasticSearchDB
from DIRAC.ConfigurationSystem.Client.Utilities import getElasticDBParameters
__RCSID__ = "$Id$"
class ElasticDB( ElasticSearchDB ):
"""
.. class:: ElasticDB
:param str __dbHost: the host name of the Elasticsearch database
:param str __dbPort: The port where the Elasticsearch database is listening
:param str clusterName: The name of the cluster.
"""
########################################################################
def __init__( self, dbname, fullName, indexPrefix = '' ):
""" c'tor
:param self: self reference
:param str dbName: name of the database for example: MonitoringDB
:param str fullName: The full name of the database for example: 'Monitoring/MonitoringDB'
:param str indexPrefix it is the indexPrefix used to get all indexes
"""
database_name = dbname
self.log = gLogger.getSubLogger( database_name )
result = getElasticDBParameters( fullName )
if not result['OK'] :
raise RuntimeError( 'Cannot get database parameters: %s' % result['Message'] )
dbParameters = result[ 'Value' ]
self.__dbHost = dbParameters[ 'Host' ]
self.__dbPort = dbParameters[ 'Port' ]
#we can have db which does not have any authentication...
self.__user = ''
if 'User' in dbParameters:
self.__user = dbParameters[ 'User' ]
self.__dbPassword = ''
if 'Password' in dbParameters:
self.__dbPassword = dbParameters[ 'Password' ]
super( ElasticDB, self ).__init__( self.__dbHost, self.__dbPort, self.__user, self.__dbPassword, indexPrefix )
if not self._connected:
raise RuntimeError( 'Can not connect to DB %s, exiting...' % self.clusterName )
self.log.info( "==================================================" )
self.log.info( "Host: %s " % self.__dbHost )
self.log.info( "Port: %d " % self.__dbPort )
self.log.info( "ClusterName: %s " % self.clusterName )
self.log.info( "==================================================" )
########################################################################
def setDbHost( self, hostName ):
"""
It is used to set the cluster host
:param str hostname: it is the host name of the elasticsearch
"""
self.__dbHost = hostName
########################################################################
def getDbHost( self ):
"""
It returns the elasticsearch database host
"""
return self.__dbHost
########################################################################
def setDbPort( self, port ):
"""
It is used to set the cluster port
:param self: self reference
:param str port: the port of the elasticsearch.
"""
self.__dbPort = port
########################################################################
def getDbPort( self ):
"""
It returns the database port
:param self: self reference
"""
return self.__dbPort
|
hgiemza/DIRAC
|
Core/Base/ElasticDB.py
|
Python
|
gpl-3.0
| 3,107
|
[
"DIRAC"
] |
8f4410cfc970f2dc0a60a91abc8854e9e1534f02f5a652243bc13a4ed7620a91
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError, e:
USE_STATICFILES = False
def null_technical_500_response(request, exc_type, exc_value, tb):
raise exc_type, exc_value, tb
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import run, AdminMediaHandler, WSGIServerException
from django.core.handlers.wsgi import WSGIHandler
try:
from werkzeug import run_simple, DebuggedApplication
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/download")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
admin_media_path = options.get('admin_media_path', '')
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Using the Werkzeug debugger (http://werkzeug.pocoo.org/)"
print "Quit the server with %s." % quit_command
path = admin_media_path or django.__path__[0] + '/contrib/admin/media'
handler = AdminMediaHandler(WSGIHandler(), path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving) and 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
url = "http://%s:%s/" % (addr, port)
webbrowser.open(url)
run_simple(addr, int(port), DebuggedApplication(handler, True),
use_reloader=use_reloader, use_debugger=True, threaded=threaded)
inner_run()
|
waseem18/oh-mainline
|
vendor/packages/django-extensions/django_extensions/management/commands/runserver_plus.py
|
Python
|
agpl-3.0
| 4,501
|
[
"VisIt"
] |
a0ee537e5f874dfb7cd7596a74f07e6e1d814713bb823658b402718b8117c7c4
|
# -*- coding: cp1252 -*-
"""
#########################################################################
Author: Shalin Shah, Vijay Dhameliya, Madhav Khakhar
Project: DNA Cloud
Graduate Mentor: Dixita Limbachya
Mentor: Prof. Manish K Gupta
Date: 5 November 2013
Website: www.guptalab.org/dnacloud
This module contains code for the main GUI Frame.
#########################################################################
"""
import os
import wx
import gc
import sys
import gzip
import time
import Queue
import thread
import encode
import decode
import panels
import sqlite3
import threading
import pytxt2pdf
import webbrowser
import compression
import unicodedata
import encodeGolay
import decodeGolay
import extraModules
import multiprocessing
import HuffmanDictionary
import sqlite3 as lite
from multiprocessing.pool import ThreadPool
from datetime import datetime
#if "win" in sys.platform and 'darwin' not in sys.platform:
# import win32com.shell.shell as shell
# ASADMIN = 'asadmin'
############################################
#Preferences database has 6 rows:-
#1->Name
#2->Mobile Number
#3->Mail Id
#4->Opened before or this is the first time
#5->Password enabled/disabled
#6->Password Value if Enabled
#7->Default Workspace Enabled/Disabled
#8->Default Workspace Name/Current workspace used
#9->Number of Workspaces
############################################
if hasattr(sys, "frozen"):
PATH = os.path.dirname(sys.executable)
else:
PATH = os.path.dirname(os.path.abspath(__file__))
#print PATH,"main"
############################################
FILE_EXT = '.dnac'
SPLASH_TIMEOUT = 2000
VERSION = "2.0"
NAME = "DNA-CLOUD"
OFFICIAL_WEBSITE = 'http://www.guptalab.org/dnacloud'
PRODUCT_LINK = "http://www.guptalab.org/dnacloud/demo"
FEEDBACK_LINK = "https://docs.google.com/forms/d/1YGu_I9z7Z56oAP1enGByBahqs-ItHbLqnBwCoJouOro/viewform"
STATUS_BAR_MESSAGE = "(C) 2014 Gupta Lab - www.guptalab.org/dnacloud"
ABOUT_COPYRIGHT = '(C) 2014 - All rights Reserved.'
KEY_DEVELOPER = 'Shalin Shah'
ICON_ARTIST = 'Foram Joshi - DNA Cloud Icon Artist'
ICON_IDEA = 'Dixita Limbachiya - DNA Cloud Icon Idea'
FB_LINK = "http://www.facebook.com/dnacloud"
TWITTER_LINK = "http://www.twitter.com/guptalab"
YOUTUBE_LINK = "http://www.youtube.com/channel/UC6JJtSNWpGlA9uIFVxEczag"
QUORA_LINK = "http://www.quora.com/Dna-Cloud"
if "linux" in sys.platform:
ABOUT_DESCRIPTION = "This software acts as a tool to store any file (inlcuding audio, video or picture) into DNA. Currently the software uses algorithms of Goldman et.al.(Goldman, N.; Bertone, P.; Chen, S.; Dessimoz, C.; Leproust, E. M.; Sipos, B.; Birney, E. (2013). Towards practical, high-capacity, low-maintenance information storage in synthesized DNA. Nature 494 (7435): 77.80). For more information visit us at"
DETAILED_LICENSE = "(C) 2014 Manish K Gupta,Laboratory of Natural Information Processing\nDA-IICT, Gandhinagar, Gujarat 382007\nhttp://www.guptalab.org/dnacloud\nEmail: dnacloud@guptalab.org\n\nThis software is available as an open source to academic, non-profit institutions etc. under an open source license\nagreement and may be used only in accordance with the terms of the agreement.Any selling or distribution of the\nprogram or it parts,original or modified, is prohibited without a written permission from Manish K Gupta."
elif "win" in sys.platform and not 'darwin' in sys.platform:
ABOUT_DESCRIPTION = "This software acts as a tool to store any file (inlcuding audio, video or picture) into DNA. Currently the software uses algorithms of Goldman et.al.\n(Goldman, N.; Bertone, P.; Chen, S.; Dessimoz, C.; Leproust, E. M.; Sipos, B.; Birney, E. (2013). Towards practical, high-capacity, low-\n-maintenance information storage in synthesized DNA. Nature 494 (7435): 7780). For more information visit us at "
DETAILED_LICENSE = "(C) 2014 Manish K Gupta,Laboratory of Natural Information Processing\nDA-IICT, Gandhinagar, Gujarat 382007\nhttp://www.guptalab.org/dnacloud\nEmail: dnacloud@guptalab.org\n\nThis software is available as an open source to academic, non-profit institutions etc. under an open source license agreement and may be used only in accordance with the terms of the agreement.\n\nAny selling or distribution of the program or its parts,original or modified, is prohibited without a written permission from Manish K Gupta."
elif 'darwin' in sys.platform:
ABOUT_DESCRIPTION = "This software acts as a tool to store any file (inlcuding audio, video or picture) into DNA. \nCurrently the software uses algorithms of Goldman et.al.(Goldman, N.; Bertone, P.; Chen, S.; \nDessimoz, C.; Leproust, E. M.; Sipos, B.; Birney, E. (2013). Towards practical, high-capacity,\nlow-maintenance information storage in synthesized DNA. Nature 494 (7435): 77.80). \nFor more information visit us at"
DETAILED_LICENSE = "(C) 2014 Manish K Gupta,Laboratory of Natural Information Processing\nDA-IICT, Gandhinagar, Gujarat 382007\nhttp://www.guptalab.org/dnacloud\nEmail: dnacloud@guptalab.org\n\nThis software is available as an open source to academic, non-profit institutions etc. under an open source license\nagreement and may be used only in accordance with the terms of the agreement.Any selling or distribution of the\nprogram or it parts,original or modified, is prohibited without a written permission from Manish K Gupta."
#############################################
class MyFrame(wx.Frame):
#########################################################################################
#This is the constructor of the main frame all the insitialization and GUI definations are in here
def __init__(self,*args,**kargs):
super(MyFrame,self).__init__(*args,**kargs)
self.pnl = panels.encodePanel(self)
self.pnl1 = panels.decodePanel(self)
self.vBox = wx.BoxSizer(wx.VERTICAL)
self.vBox.Add(self.pnl,1,wx.EXPAND)
self.vBox.Add(self.pnl1,1,wx.EXPAND)
self.SetSizer(self.vBox)
self.pnl.Hide()
self.pnl1.Hide()
self.clear()
self.Layout()
if "linux" in sys.platform or 'darwin' in sys.platform:
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
elif "win" in sys.platform and not 'darwin' in sys.platform:
ico = wx.Icon(PATH + '\..\icons\DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
#Create an instance of Menu bar and instances of menues you want in menuBar
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
self.prefMenu = wx.Menu()
helpMenu = wx.Menu()
socialMediaMenu = wx.Menu()
estimatorMenu = wx.Menu()
estimatorMenu.Append(61,"Memory Required (Data File)")
estimatorMenu.Append(62,"Bio-Chemical Properties (DNA File)")
#Add items to the menues by using the Append option after creating the item or using the builtin item
fileItem1 = wx.MenuItem(fileMenu,1,"File to &DNA (Encoder)")
#fileItem1.SetBitmap(wx.Bitmap(PATH + '/../icons/encode.png'))
fileMenu.AppendItem(fileItem1)
fileItem2 = wx.MenuItem(fileMenu,2,"DNA to &File (Decoder)")
#fileItem2.SetBitmap(wx.Bitmap(PATH + '/../icons/decode.png'))
fileMenu.AppendItem(fileItem2)
subMenu = fileMenu.AppendMenu(wx.ID_ANY,'Storage E&stimator',estimatorMenu)
#subMenu.SetBitmap(wx.Bitmap(PATH + '/../icons/estimator.png'))
fileMenu.AppendSeparator()
fileItem7 = wx.MenuItem(fileMenu,7,"Export Generated &Barcode")
#fileItem7.SetBitmap(wx.Bitmap(PATH + '/../icons/barcodeMenu.png'))
fileMenu.AppendItem(fileItem7)
fileItem4 = wx.MenuItem(fileMenu,4,"Export DNA Synthesizer File")
#fileItem4.SetBitmap(wx.Bitmap(PATH + '/../icons/exportDNA.png'))
fileMenu.AppendItem(fileItem4)
fileItem5 = wx.MenuItem(fileMenu,5,"Import DNA Sequencer File")
#fileItem5.SetBitmap(wx.Bitmap(PATH + '/../icons/importDNA.png'))
fileMenu.AppendItem(fileItem5)
fileItem8 = wx.MenuItem(fileMenu,8,"Export Details to PDF")
#fileItem8.SetBitmap(wx.Bitmap(PATH + '/../icons/pdf.jpg'))
fileMenu.AppendItem(fileItem8)
fileItem9 = wx.MenuItem(fileMenu,9,"Export Latex File")
#fileItem9.SetBitmap(wx.Bitmap(PATH + '/../icons/tex.png'))
fileMenu.AppendItem(fileItem9)
#fileMenu.AppendMenu(wx.ID_ANY,'E&xport to CSV',exportMenu)
#fileMenu.AppendMenu(wx.ID_ANY,'&Import from CSV',importMenu)
fileItem6 = wx.MenuItem(fileMenu,6,"&Clear Temporary Files")
#fileItem6.SetBitmap(wx.Bitmap(PATH + '/../icons/clearFiles.png'))
fileMenu.AppendItem(fileItem6)
fileMenu.AppendSeparator()
fileItem3 = wx.MenuItem(fileMenu,3,"&Exit")
#fileItem3.SetBitmap(wx.Bitmap(PATH + '/../icons/quit.png'))
fileMenu.AppendItem(fileItem3)
self.prefItem1 = wx.MenuItem(self.prefMenu,11,"Password Protection",kind= wx.ITEM_CHECK); #Item check makes this pref checkable
self.prefMenu.AppendItem(self.prefItem1);
prefItem3 = wx.MenuItem(self.prefMenu,13,"Change Password");
#prefItem3.SetBitmap(wx.Bitmap(PATH + '/../icons/changePassword.png'))
self.prefMenu.AppendItem(prefItem3);
self.prefMenu.AppendSeparator()
prefItem2 = wx.MenuItem(self.prefMenu,12,"User Details")
#prefItem2.SetBitmap(wx.Bitmap(PATH + '/../icons/userDetails.gif'))
self.prefMenu.AppendItem(prefItem2)
prefItem4 = wx.MenuItem(self.prefMenu,14,"Switch Workspace")
#prefItem4.SetBitmap(wx.Bitmap(PATH + '/../icons/switch.png'))
self.prefMenu.AppendItem(prefItem4)
helpItem2 = wx.MenuItem(helpMenu,22,"User Manual")
#helpItem2.SetBitmap(wx.Bitmap(PATH + '/../icons/manual.jpg'))
helpMenu.AppendItem(helpItem2)
helpItem5 = wx.MenuItem(helpMenu,25,"Product Demo")
#helpItem5.SetBitmap(wx.Bitmap(PATH + '/../icons/demoVideo.png'))
helpMenu.AppendItem(helpItem5)
helpItem3 = wx.MenuItem(helpMenu,23,"Product Feedback")
#helpItem3.SetBitmap(wx.Bitmap(PATH + '/../icons/feedback.png'))
helpMenu.AppendItem(helpItem3)
helpItem4 = wx.MenuItem(helpMenu,24,"Credits")
#helpItem4.SetBitmap(wx.Bitmap(PATH + '/../icons/credits.png'))
helpMenu.AppendItem(helpItem4)
helpMenu.AppendSeparator()
helpItem1 = wx.MenuItem(helpMenu,21,"About Us")
#helpItem1.SetBitmap(wx.Bitmap(PATH + '/../icons/aboutUs.png'))
helpMenu.AppendItem(helpItem1)
socialMediaItem1 = wx.MenuItem(socialMediaMenu,41,"Facebook")
#socialMediaItem1.SetBitmap(wx.Bitmap(PATH + '/../icons/facebook.bmp'))
socialMediaItem2 = wx.MenuItem(socialMediaMenu,42,"Twitter")
#socialMediaItem2.SetBitmap(wx.Bitmap(PATH + '/../icons/twitter.bmp'))
socialMediaItem3 = wx.MenuItem(socialMediaMenu,43,"Quora")
#socialMediaItem3.SetBitmap(wx.Bitmap(PATH + '/../icons/quora.bmp'))
socialMediaItem4 = wx.MenuItem(socialMediaMenu,44,"Youtube Channel")
socialMediaMenu.AppendItem(socialMediaItem1)
socialMediaMenu.AppendItem(socialMediaItem2)
socialMediaMenu.AppendItem(socialMediaItem3)
socialMediaMenu.AppendItem(socialMediaItem4)
menuBar.Append(fileMenu,'&File')
menuBar.Append(self.prefMenu,'&Preferences')
menuBar.Append(helpMenu,"&Help")
menuBar.Append(socialMediaMenu,"F&ollow Us")
self.SetMenuBar(menuBar)
#Create a status Bar which can be used to indicate the progress
statusBar = self.CreateStatusBar();
statusBar.SetStatusText(STATUS_BAR_MESSAGE);
#Register methods when menu items are clicked ie bind the method with a menuItem
self.Bind(wx.EVT_MENU,self.OnQuit,id = 3)
self.Bind(wx.EVT_MENU,self.exportBarcode,id = 7)
self.Bind(wx.EVT_MENU,self.newMenuItemEncode,id = 1);
self.Bind(wx.EVT_CLOSE,self.OnQuit)
self.Bind(wx.EVT_MENU,self.newMenuItemDecode,id = 2);
self.Bind(wx.EVT_MENU,self.aboutUs,id = 21)
self.Bind(wx.EVT_MENU,self.userManuel,id = 22)
self.Bind(wx.EVT_MENU,self.exportList,id = 4)
self.Bind(wx.EVT_MENU,self.importList,id = 5)
self.Bind(wx.EVT_MENU,self.onClear,id = 6)
self.Bind(wx.EVT_MENU,self.settings,id = 12)
self.Bind(wx.EVT_MENU,self.credits,id = 24)
self.Bind(wx.EVT_MENU,self.enablePassword,id = 11)
self.Bind(wx.EVT_MENU,self.changePassword,id = 13)
#self.Bind(wx.EVT_MENU,self.exportString,id = 41)
#self.Bind(wx.EVT_MENU,self.exportList,id = 42)
#self.Bind(wx.EVT_MENU,self.importString,id = 51)
#self.Bind(wx.EVT_MENU,self.importList,id = 52)
self.Bind(wx.EVT_MENU,self.productFeedback,id = 23)
self.Bind(wx.EVT_MENU,self.memEstimator,id = 61)
self.Bind(wx.EVT_MENU,self.estimator,id = 62)
self.Bind(wx.EVT_MENU,self.productDemo,id = 25)
self.Bind(wx.EVT_MENU,self.exportPdf,id = 8)
self.Bind(wx.EVT_MENU,self.exportLatex,id = 9)
self.Bind(wx.EVT_MENU,self.followFB,id = 41)
self.Bind(wx.EVT_MENU,self.followTwitter,id = 42)
self.Bind(wx.EVT_MENU,self.followQuora,id = 43)
self.Bind(wx.EVT_MENU,self.followYoutube,id = 44)
self.Bind(wx.EVT_MENU,self.switchWork,id = 14)
super(MyFrame,self).SetSize((1000,1000))
super(MyFrame,self).SetTitle(NAME)
super(MyFrame,self).Show()
p = wx.Point(200,200)
super(MyFrame,self).Move(p)
self.prefs = False
if "win" in sys.platform and not 'darwin'in sys.platform:
con = sqlite3.connect(PATH + '\..\database\prefs.db')
#print "windows"
elif "linux" in sys.platform or 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '/../database/prefs.db')
#print "unix"
try:
cur = con.cursor()
string = (cur.execute('SELECT * FROM prefs WHERE id = 4').fetchone())[1]
self.hasDefaultWorkspace = (cur.execute('SELECT * FROM prefs WHERE id = 7').fetchone())[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
self.hasDefaultWorkspace = unicodedata.normalize('NFKD', self.hasDefaultWorkspace).encode('ascii','ignore')
if string == "false":
self.prefs = True
string = (cur.execute('SELECT * FROM prefs WHERE id = 5').fetchone())[1]
if string == 'true':
password = (cur.execute('SELECT * FROM prefs WHERE id = 6').fetchone())[1]
if "linux" in sys.platform:
password = unicodedata.normalize('NFKD', password).encode('ascii','ignore')
result = wx.PasswordEntryDialog(None,'Please Enter Your Password','Password','',wx.OK | wx.CANCEL)
passwordMatch = False
while passwordMatch != True:
match = result.ShowModal()
if match == wx.ID_OK:
if password == result.GetValue():
passwordMatch = True
else:
wx.MessageDialog(self,'Your Password is Incorrect please Enter Again', 'Information!',wx.OK |wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
else:
result.Destroy()
sys.exit()
#self.qrText = ""
#for i in cur.execute('SELECT * FROM prefs where id < 4'):
# if "win" in sys.platform:
# self.qrText = self.qrText + i[1] + "\n"
# if "linux" in sys.platform:
# self.qrText = self.qrText + unicodedata.normalize('NFKD', i[1]).encode('ascii','ignore') + "\n"
self.isPasswordProtected = cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]
if "linux" in sys.platform:
self.isPasswordProtected = unicodedata.normalize('NFKD', cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]).encode('ascii','ignore')
#for i in cur.execute('SELECT * FROM prefs'):
# print i
con.close()
except sqlite3.OperationalError:
#print "New prefs DB"
cur.execute('DROP TABLE IF EXISTS prefs')
cur.execute('CREATE TABLE prefs(id INT,details TEXT)')
cur.execute('INSERT INTO prefs VALUES(1,"Your full Name")')
cur.execute('INSERT INTO prefs VALUES(2,"Cell Phone No")')
cur.execute('INSERT INTO prefs VALUES(3,"Your mail Id")')
cur.execute('INSERT INTO prefs VALUES(4,"false")')
cur.execute('INSERT INTO prefs VALUES(5,"false")')
cur.execute('INSERT INTO prefs VALUES(6,"password")')
cur.execute('INSERT INTO prefs VALUES(7,"False")')
cur.execute('INSERT INTO prefs VALUES(8,"None")')
cur.execute('INSERT INTO prefs VALUES(9,"0")')
con.commit()
self.prefs = True
#self.qrText = ""
#for i in cur.execute('SELECT * FROM prefs where id < 4'):
# if "win" in sys.platform:
# self.qrText = self.qrText + i[1] + "\n"
# if "linux" in sys.platform:
# self.qrText = self.qrText + unicodedata.normalize('NFKD', i[1]).encode('ascii','ignore') + "\n"
self.isPasswordProtected = cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]
if "linux" in sys.platform:
self.isPasswordProtected = unicodedata.normalize('NFKD', cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]).encode('ascii','ignore')
con.close()
self.hasDefaultWorkspace = "False"
#First of all asked whether to encode or deocode so display a dialog to ask him what he wants to do
#self.ask = panels.chooseDialog(None,101,"Welcome to DNA-CLOUD!")
#self.ask.encodeBut.Bind(wx.EVT_BUTTON,self.encode)
#self.ask.decodeBut.Bind(wx.EVT_BUTTON,self.decode)
#self.ask.ShowModal()
if self.hasDefaultWorkspace == "False":
panels.workspaceLauncher(None,101,"Workspace Launcher!").ShowModal()
if self.prefs:
panels.Preferences(None,0,"Your Details").ShowModal()
if self.isPasswordProtected == 'true':
self.prefMenu.Check(self.prefItem1.GetId(), True)
else:
self.prefMenu.Check(self.prefItem1.GetId(), False)
#self.onUseQrcode(self.qrText)
##################################################################
#The password modules
def changePassword(self,e):
if "win" in sys.platform and not 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '\..\database\prefs.db')
elif "linux" in sys.platform or 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
password = panels.setPasswordDialog(None,101,"Password").ShowModal()
if "win" in sys.platform:
isEnabled = cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]
elif "linux" in sys.platform:
isEnabled = unicodedata.normalize('NFKD', cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]).encode('ascii','ignore')
if isEnabled == 'true':
self.prefMenu.Check(self.prefItem1.GetId(), True)
elif isEnabled == 'false':
self.prefMenu.Check(self.prefItem1.GetId(), False)
def enablePassword(self,e):
if "win" in sys.platform and not 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '\..\database\prefs.db')
elif "linux" in sys.platform or 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
if self.prefItem1.IsChecked():
password = panels.setPasswordDialog(None,101,"Password").ShowModal()
else:
cur.execute('UPDATE prefs SET details = ? WHERE id = ?',("false",5))
#con = sqlite3.connect("/home/../database/prefs.db")
if "win" in sys.platform:
isEnabled = cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]
elif "linux" in sys.platform:
isEnabled = unicodedata.normalize('NFKD', cur.execute('SELECT * FROM prefs where id = 5').fetchone()[1]).encode('ascii','ignore')
if isEnabled == 'true':
self.prefMenu.Check(self.prefItem1.GetId(), True)
elif isEnabled == 'false':
self.prefMenu.Check(self.prefItem1.GetId(), False)
####################################################################
#Main Encode Function is this
#This method is basically called whenever you want to encode some file for now let the file be text file
def encode(self,e):
self.pnl1.Hide()
self.pnl.Show()
self.clear()
self.Layout()
self.bindEncodeItems()
self.ask.Destroy()
#When the choose file button is clicked then we come here
def onChoose(self,e):
#clear fields
self.clear()
fileSelector = wx.FileDialog(self, message="Choose a file",defaultFile="",style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR )
if fileSelector.ShowModal() == wx.ID_OK:
paths = fileSelector.GetPaths()
#print paths
self.path = unicodedata.normalize('NFKD', paths[0]).encode('ascii','ignore')
else:
self.path = None
fileSelector.Destroy()
del fileSelector
#print self.pnl.IsShown(), self.pnl1.IsShown(), type(self.path)
if self.pnl.IsShown() and isinstance(self.path, str):
#print "Encode"
self.pnl.txt.WriteText(self.path)
self.pnl.txt5.WriteText(str(os.path.getsize(self.path)))
self.pnl.txt4.WriteText("117")
self.pnl.txt2.WriteText(str(int(5.5 * os.path.getsize(self.path))))
self.pnl.txt3.WriteText(str(int(5.5 * os.path.getsize(self.path))/25 - 3))
elif self.pnl1.IsShown() and isinstance(self.path, str):
#print "Decode"
self.clear()
self.pnl1.txt.WriteText(self.path)
self.pnl1.txt2.WriteText(str(int((os.path.getsize(self.path)/117 + 3)*25)))
self.pnl1.txt3.WriteText(str(int(os.path.getsize(self.path)/117)))
return
############################################################################
#This are the save cancel button modules
def save(self,e):
con = sqlite3.connect(PATH + '/../database/prefs.db')
try:
cur = con.cursor()
workspacePath = (cur.execute('SELECT * FROM prefs where id = 8').fetchone())[1]
if "linux" in sys.platform:
workspacePath = unicodedata.normalize('NFKD', workspacePath).encode('ascii','ignore')
except:
workspacePath = 'None'
if not self.path :
wx.MessageDialog(self,'Please Select a file from you file system before Converting', 'Note!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
return
#variable terminated keeps track if dna file is created or not
#Let's set path to save output file
if workspacePath == "None":
locationSelector = wx.FileDialog(self,"Please select location to save your encoded file",style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if locationSelector.ShowModal() == wx.ID_OK:
paths = locationSelector.GetPath()
if "win" in sys.platform:
self.savePath = paths
elif "linux" in sys.platform:
self.savePath = unicodedata.normalize('NFKD', paths).encode('ascii','ignore')
terminated = False
else:
terminated = True
locationSelector.Destroy()
del locationSelector
else:
xtime = datetime.now().timetuple()
self.savePath = workspacePath + "/encoded"
terminated = False
if not hasattr( self, 'savePath' ):
wx.MessageDialog( self,'Output file path is not given', 'Error!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
return
inputFilename = os.path.basename( self.path )
inputFilenameNoExtension = os.path.splitext( inputFilename )[0]
self.savePath += "_" + inputFilenameNoExtension
self.compressFilePath = compression.compressedFilePath( self.path, workspacePath, self.pnl.compOptionsComboBox.GetCurrentSelection() )
self.encodingScheme = self.pnl.algoOptionsComboBox.GetCurrentSelection()
# compression thread is called only if compression is possible
if self.compressFilePath:
if 'darwin' in sys.platform:
compressionThread = threading.Thread(name = "compression", target = compression.compress, args = ( self.path, self.compressFilePath, self.pnl.compOptionsComboBox.GetCurrentSelection() ))
else:
compressionThread = multiprocessing.Process(target = compression.compress , args = ( self.path, self.compressFilePath, self.pnl.compOptionsComboBox.GetCurrentSelection() ) , name = "Compression Process")
compressionThread.daemon = True
compressionThread.start()
progressDialog = wx.ProgressDialog('Please wait...', 'Compressing the File....This may take several minutes....\n\t....so sit back and relax....',parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
progressDialog.SetSize((450,180))
if 'darwin' in sys.platform:
while compressionThread.isAlive():
time.sleep(0.1)
if not progressDialog.UpdatePulse("Compressing the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
compressionThread.terminate()
terminated = True
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
break
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not progressDialog.UpdatePulse("Compressing the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
compressionThread.terminate()
terminated = True
self.clear()
break
progressDialog.Destroy()
compressionThread.join()
self.readPath = self.compressFilePath
else:
self.readPath = self.path
#encoding starts here
if 'darwin' in sys.platform:
if self.encodingScheme == 0:
encodingThread = threading.Thread(name = "encode", target = encodeGolay.encode, args = ( self.readPath , self.savePath, ))
else:
encodingThread = threading.Thread(name = "encode", target = encode.encode, args = ( self.readPath, self.savePath, ))
else:
if self.encodingScheme == 0:
encodingThread = multiprocessing.Process(target = encodeGolay.encode , args = ( self.readPath , self.savePath, ) , name = "Encode Process")
else:
encodingThread = multiprocessing.Process(target = encode.encode , args = ( self.readPath, self.savePath, ) , name = "Encode Process")
if not terminated:
encodingThread.start()
progressDialog = wx.ProgressDialog('Please wait...', 'Encoding the File....This may take several minutes....\n\t....so sit back and relax....',parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
progressDialog.SetSize((450,180))
if 'darwin' in sys.platform:
while encodingThread.isAlive():
time.sleep(0.1)
if not progressDialog.UpdatePulse("Encoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
encodingThread.terminate()
terminated = True
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
break
if not encodingThread.isAlive():
encodingThread.join()
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not progressDialog.UpdatePulse("Encoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
encodingThread.terminate()
terminated = True
self.clear()
break
encodingThread.join()
encodingThread.terminate()
progressDialog.Destroy()
if not terminated:
wx.MessageDialog(self,'File has been created', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def discard(self,e):
if not self.pnl.txt.IsEmpty():
self.clear()
else:
wx.MessageDialog(self,'Please Select a file from you file system before Reseting', 'Note!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def clear(self):
self.pnl.compOptionsComboBox.SetStringSelection( "No Compression" )
if self.pnl.IsShown():
self.pnl.txt.Clear()
self.pnl.txt2.Clear()
self.pnl.txt3.Clear()
self.pnl.txt4.Clear()
self.pnl.txt5.Clear()
elif self.pnl1.IsShown():
self.pnl1.txt.Clear()
self.pnl1.txt2.Clear()
self.pnl1.txt3.Clear()
#self.pnl1.txt4.Clear()
#self.pnl1.txt5.Clear()
##################################################################
#This is the main decoding part
def decode(self,e):
self.pnl.Hide()
self.pnl1.Show()
self.bindDecodeItems()
self.clear()
self.Layout()
self.ask.Destroy()
def decodeBut1(self,e):
try:
progressMax = 100
dialog = wx.ProgressDialog("Note!", "Your file is being prepared from DNA Chunks, Please Wait...", progressMax,parent = self, style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
keepGoing = True
count = 0
if not self.pnl1.txt21.IsEmpty():
base3String = extraModules.DNABaseToBase3(self.pnl1.txt21.GetString(0,self.pnl1.txt21.GetLastPosition()))
count = count + 12
keepGoing = dialog.Update(count)
else:
dialog.Destroy()
wx.MessageDialog(self,'Error Please write a dna string', 'Note!',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
s1 = extraModules.s4ToS1S2S3(base3String)
count = count + 25
keepGoing = dialog.Update(count)
if s1 == -1:
dialog.Destroy()
wx.MessageDialog(self,'Error imporper string', 'Note!',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
asciiList = HuffmanDictionary.base3ToAscii(s1)
count = count + 13
keepGoing = dialog.Update(count)
if asciiList == None:
dialog.Destroy()
wx.MessageDialog(self,'Error imporper string', 'Note!',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
string = extraModules.asciiToString(asciiList)
count = count + 25
keepGoing = dialog.Update(count)
if "win" in sys.platform and not 'darwin' in sys.platform:
decodedFile = file(PATH + "\..\decodedFiles\decode","wb")
elif "linux" in sys.platform or 'darwin' in sys.platform:
decodedFile = file(PATH + "/../decodedFiles/decode","wb")
decodedFile.write(string)
decodedFile.close()
count = count + 25
keepGoing = dialog.Update(count)
dialog.Destroy()
wx.MessageDialog(self,'File created in the decoded Files folder', 'Note!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
except MemoryError:
wx.MessageDialog(self,'MemoryError Please free up ypur memory or use swap memory or increase RAM', 'Note!',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
#This method is called whenever we have a DNA String to be decoded
def decodeBut2(self,e):
con = sqlite3.connect(PATH + '/../database/prefs.db')
try:
cur = con.cursor()
string = (cur.execute('SELECT * FROM prefs where id = 8').fetchone())[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
except:
string = 'None'
if (not self.pnl1.txt.IsEmpty()) and (FILE_EXT in self.pnl1.txt.GetString(0,self.pnl1.txt.GetLastPosition())) and string == 'None':
locationSelector = wx.FileDialog(self,"Please select location to save your decoded file",style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if locationSelector.ShowModal() == wx.ID_OK:
paths = locationSelector.GetPath()
if "win" in sys.platform:
self.savePath = paths
elif "linux" in sys.platform:
self.savePath = unicodedata.normalize('NFKD', paths).encode('ascii','ignore')
terminated = False
else:
terminated = True
locationSelector.Destroy()
del locationSelector
self.decodingScheme = self.pnl1.algoDecodeOptionsComboBox.GetCurrentSelection()
if 'darwin' in sys.platform:
if self.decodingScheme == 0:
decodingThread = threading.Thread(name = "Decode", target = decodeGolay.decode, args = ( self.path , self.savePath, ))
else:
decodingThread = threading.Thread(name = "Decode", target = decode.decode, args = ( self.path , self.savePath, ))
else:
if self.decodingScheme == 0:
decodingThread = multiprocessing.Process(target = decodeGolay.decode , args = ( self.path , self.savePath, ) , name = "Decode Process")
else:
decodingThread = multiprocessing.Process(target = decode.decode , args = ( self.path , self.savePath, ) , name = "Decode Process")
if not terminated:
decodingThread.start()
temp = wx.ProgressDialog('Please wait...', 'Decoding the File....This may take several minutes....\n\t....so sit back and relax....',parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
temp.SetSize((450,180))
if 'darwin' in sys.platform:
while decodingThread.isAlive():
time.sleep(0.1)
if not temp.UpdatePulse("Decoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
temp.Destroy()
if not decodingThread.isAlive():
decodingThread.join()
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not temp.UpdatePulse("Decoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
decodingThread.terminate()
terminated = True
self.clear()
break
temp.Destroy()
decodingThread.join()
decodingThread.terminate()
if not terminated:
wx.MessageDialog(self,'File has been created', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
elif (not self.pnl1.txt.IsEmpty()) and (FILE_EXT in self.pnl1.txt.GetString(0,self.pnl1.txt.GetLastPosition())) and string != 'None':
terminated = False
xtime = datetime.now().timetuple()
self.savePath = string + "/decoded_"
self.decodingScheme = self.pnl1.algoDecodeOptionsComboBox.GetCurrentSelection()
if 'darwin' in sys.platform:
if self.decodingScheme == 0:
decodingThread = threading.Thread(name = "Decode", target = decodeGolay.decode, args = ( self.path , self.savePath, ))
else:
decodingThread = threading.Thread(name = "Decode", target = decode.decode, args = (self.path,self.savePath,))
else:
if self.decodingScheme == 0:
decodingThread = multiprocessing.Process(target = decodeGolay.decode , args = ( self.path , self.savePath, ) , name = "Decode Process")
else:
decodingThread = multiprocessing.Process(target = decode.decode , args = ( self.path , self.savePath, ) , name = "Decode Process")
if not terminated:
decodingThread.start()
temp = wx.ProgressDialog('Please wait...', 'Decoding the File....This may take several minutes....\n\t....so sit back and relax....',parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
temp.SetSize((450,180))
if 'darwin' in sys.platform:
while decodingThread.isAlive():
time.sleep(0.1)
if not temp.UpdatePulse("Decoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
#decodingThread.terminate()
#terminated = True
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
#break
temp.Destroy()
if not decodingThread.isAlive():
decodingThread.join()
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not temp.UpdatePulse("Decoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
decodingThread.terminate()
terminated = True
self.clear()
break
temp.Destroy()
decodingThread.join()
decodingThread.terminate()
if not terminated:
wx.MessageDialog(self,'File has been created', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
else:
wx.MessageDialog(self,'Please Select a .dnac file', 'Note!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def discard1(self,e):
self.pnl1.txt21.Clear()
def onClear(self,e):
size = 0
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
WORKSPACE_PATH = cur.execute('SELECT * FROM prefs WHERE id = 8').fetchone()[1]
if "linux" in sys.platform:
WORKSPACE_PATH = unicodedata.normalize('NFKD', WORKSPACE_PATH).encode('ascii','ignore')
if not os.path.isdir(WORKSPACE_PATH + '/.temp'):
os.mkdir(WORKSPACE_PATH + '/.temp')
if "win" in sys.platform and not 'darwin' in sys.platform:
os.chdir(WORKSPACE_PATH + '\.temp')
try:
size += os.path.getsize("dnaString.txt")
os.system("del dnaString.txt")
EXIST_DNASTRING = True
except OSError:
EXIST_DNASTRING = False
try:
size += os.path.getsize("barcode")
os.system("del barcode")
EXIST_BARCODE = True
except OSError:
EXIST_BARCODE = False
try:
size += os.path.getsize("details.txt")
os.system("del details.txt")
EXIST_DETAILS = True
except OSError:
EXIST_DETAILS = False
elif "linux" in sys.platform or 'darwin' in sys.platform:
os.chdir(WORKSPACE_PATH + '/.temp')
try:
size += os.path.getsize("dnaString.txt")
os.system("rm dnaString.txt")
EXIST_DNASTRING = True
except OSError:
EXIST_DNASTRING = False
try:
size += os.path.getsize("barcode")
os.system("rm barcode")
EXIST_BARCODE = True
except OSError:
EXIST_BARCODE = False
try:
size += os.path.getsize("details.txt")
os.system("rm details.txt")
EXIST_DETAILS = True
except OSError:
EXIST_DETAILS = False
os.chdir(PATH)
wx.MessageDialog(self,'Temporary Files have been removed\nSpace Freed : '+ str(size/1000000) + " MB" , 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
#######################################################################
#This are the modules which run when different list items from menu bar are clicked
def followFB(self,e):
webbrowser.open(FB_LINK)
def followTwitter(self,e):
webbrowser.open(TWITTER_LINK)
def followQuora(self,e):
webbrowser.open(QUORA_LINK)
def followYoutube(self,e):
webbrowser.open(YOUTUBE_LINK)
def switchWork(self,e):
panels.workspaceLauncher(None,102,"Switch Workspace!").ShowModal()
def credits(self,e):
if "win" in sys.platform and not 'darwin' in sys.platform:
os.chdir(PATH + '\..\help')
os.system("start Credits.pdf")
os.chdir(PATH)
elif "linux" in sys.platform:
os.chdir(PATH + '/../help')
os.system("xdg-open Credits.pdf")
os.chdir(PATH)
elif 'darwin' in sys.platform:
os.chdir(PATH + '/../help')
os.system('open Credits.pdf')
os.chdir(PATH)
def productDemo(self,e):
webbrowser.open(PRODUCT_LINK)
def productFeedback(self,e):
webbrowser.open(FEEDBACK_LINK)
def userManuel(self,e):
if "win" in sys.platform and not 'darwin' in sys.platform:
os.chdir(PATH + '\..\help')
os.system("start UserManual.pdf")
os.chdir(PATH)
elif "linux" in sys.platform:
os.chdir(PATH + '/../help')
os.system("xdg-open UserManual.pdf")
os.chdir(PATH)
elif 'darwin' in sys.platform:
os.chdir(PATH + '/../help')
os.system("open UserManual.pdf")
os.chdir(PATH)
def exportPdf(self,e):
fileSelector = wx.FileDialog(self, message="Choose a .dnac file",defaultFile="",style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR )
if fileSelector.ShowModal() == wx.ID_OK:
paths = fileSelector.GetPaths()
if "win" in sys.platform and not 'darwin' in sys.platform:
filePath = paths[0]
elif "linux" in sys.platform or 'darwin' in sys.platform:
filePath = unicodedata.normalize('NFKD', paths[0]).encode('ascii','ignore')
terminated = False
if FILE_EXT in filePath:
locationSelector = wx.FileDialog(self,"Please select location to save your PDF file",style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if locationSelector.ShowModal() == wx.ID_OK:
paths = locationSelector.GetPath()
if "win" in sys.platform and not 'darwin' in sys.platform:
savePath = paths
elif "linux" in sys.platform or 'darwin' in sys.platform:
savePath = unicodedata.normalize('NFKD', paths).encode('ascii','ignore')
terminated = False
else:
terminated = True
locationSelector.Destroy()
del locationSelector
if 'darwin' in sys.platform:
#print filePath, savePath
exportToPdf = threading.Thread(name = "Export Thread", target = extraModules.exportToPdf, args = (filePath, savePath,))
else:
exportToPdf = multiprocessing.Process(target = extraModules.exportToPdf , name = "PDF Exporter" , args = (filePath,savePath))
if not terminated:
exportToPdf.start()
temp = wx.ProgressDialog('Exporting to pdf....This may take a while....', 'Please wait...' ,parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
temp.SetSize((450,180))
if 'darwin' in sys.platform:
while exportToPdf.isAlive():
time.sleep(0.1)
if not temp.UpdatePulse("Exporting the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
#p.terminate()
#terminated = True
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
#break
temp.Destroy()
if not exportToPdf.isAlive():
exportToPdf.join()
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not temp.UpdatePulse("Exporting the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
exportToPdf.terminate()
terminated = True
self.clear()
break
temp.Destroy()
exportToPdf.join()
exportToPdf.terminate()
if not terminated:
wx.MessageDialog(self,'File has been created', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
else:
wx.MessageDialog(self,'Please select a .dnac file', 'Information!',wx.OK |wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
fileSelector.Destroy()
del fileSelector
def exportLatex(self,e):
fileSelector = wx.FileDialog(self, message="Choose a .dnac file",defaultFile="",style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR )
if fileSelector.ShowModal() == wx.ID_OK:
paths = fileSelector.GetPaths()
if "win" in sys.platform and not 'darwin' in sys.platform:
filePath = paths[0]
elif "linux" in sys.platform or 'darwin' in sys.platform:
filePath = unicodedata.normalize('NFKD', paths[0]).encode('ascii','ignore')
terminated = False
if FILE_EXT in filePath:
locationSelector = wx.FileDialog(self,"Please select location to save your Latex file",style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if locationSelector.ShowModal() == wx.ID_OK:
paths = locationSelector.GetPath()
if "win" in sys.platform and not 'darwin' in sys.platform:
savePath = paths
elif "linux" in sys.platform or 'darwin' in sys.platform:
savePath = unicodedata.normalize('NFKD', paths).encode('ascii','ignore')
terminated = False
else:
terminated = True
locationSelector.Destroy()
del locationSelector
if 'darwin' in sys.platform:
exportToLatex = threading.Thread(name = "Export Thread", target = extraModules.exportToLatex, args = (filePath, savePath,))
else:
exportToLatex = multiprocessing.Process(target = extraModules.exportToLatex , name = "Latex Exporter" , args = (filePath,savePath))
if not terminated:
exportToLatex.start()
temp = wx.ProgressDialog('Exporting to latex file....This may take a while....', 'Please wait...' ,parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
temp.SetSize((450,180))
if 'darwin' in sys.platform:
while exportToLatex.isAlive():
time.sleep(0.1)
if not temp.UpdatePulse("Exporting the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
#p.terminate()
#terminated = True
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
#break
temp.Destroy()
if not exportToLatex.isAlive():
exportToLatex.join()
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not temp.UpdatePulse("Exporting the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
exportToLatex.terminate()
terminated = True
self.clear()
break
temp.Destroy()
exportToLatex.join()
exportToLatex.terminate()
else:
wx.MessageDialog(self,'Please select a .dnac file', 'Information!',wx.OK |wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
fileSelector.Destroy()
del fileSelector
def exportList(self,e):
wx.MessageDialog(self,'This feature is yet to be added please bear with us', 'Sorry',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def importList(self,e):
wx.MessageDialog(self,'This feature is yet to be added please bear with us', 'Sorry',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
#Show a confirmation dialog box if the user wants to quit or not and for that show modal shows box
def OnQuit(self,item):
quitMessageBox = wx.MessageDialog(self,"Are you sure you want to Quit??","Quitting the Application",wx.YES | wx.NO | wx.ICON_EXCLAMATION)
result = quitMessageBox.ShowModal();
if result == wx.ID_YES:
super(MyFrame,self).Destroy()
sys.exit()
elif result == wx.ID_NO:
quitMessageBox.Destroy()
def newMenuItemEncode(self,e):
if self.pnl1.IsShown():
self.pnl1.Hide()
self.pnl.Show()
self.clear()
self.Layout()
self.pnl.Refresh()
self.bindEncodeItems()
gc.collect()
elif not (self.pnl1.IsShown() or self.pnl.IsShown()):
self.pnl.Show()
self.clear()
self.Layout()
self.pnl.Refresh()
self.bindEncodeItems()
gc.collect()
else:
wx.MessageDialog(self,"You are already on the Encode Page!","Note!",wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def newMenuItemDecode(self,e):
if self.pnl.IsShown():
self.pnl.Hide()
self.pnl1.Show()
self.Layout()
self.bindDecodeItems()
self.pnl1.Refresh()
gc.collect()
elif not (self.pnl1.IsShown() or self.pnl.IsShown()):
self.pnl1.Show()
self.clear()
self.Layout()
self.pnl.Refresh()
self.bindEncodeItems()
gc.collect()
else:
wx.MessageDialog(self,"You are already on the Decode Page!","Note!",wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def aboutUs(self,e):
info = wx.AboutDialogInfo()
if "win" in sys.platform and not 'darwin' in sys.platform:
info.SetIcon(wx.Icon(PATH + '\..\icons\DNAicon.png', wx.BITMAP_TYPE_PNG))
elif "linux" in sys.platform or 'darwin' in sys.platform:
info.SetIcon(wx.Icon(PATH + '/../icons/DNAicon.png', wx.BITMAP_TYPE_PNG))
info.SetName(NAME)
info.SetVersion(VERSION)
info.SetDescription(ABOUT_DESCRIPTION)
info.SetCopyright(ABOUT_COPYRIGHT)
info.SetWebSite(OFFICIAL_WEBSITE)
info.SetLicence(DETAILED_LICENSE)
info.AddDeveloper(KEY_DEVELOPER)
info.AddArtist(ICON_ARTIST)
info.AddArtist(ICON_IDEA)
wx.AboutBox(info)
def settings(self,e):
p = panels.Preferences(None,0,"Details").ShowModal()
def memEstimator(self,e):
gc.collect()
panels.memEstimator(None,103,"Approximate the Values").ShowModal()
def estimator(self,e):
gc.collect()
panels.estimator(None,103,"Approximate the Values").ShowModal()
def exportBarcode(self,e):
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
WORKSPACE_PATH = cur.execute('SELECT * FROM prefs WHERE id = 8').fetchone()[1]
if "linux" in sys.platform:
WORKSPACE_PATH = unicodedata.normalize('NFKD', WORKSPACE_PATH).encode('ascii','ignore')
if not os.path.isdir(WORKSPACE_PATH + '/barcode'):
os.mkdir(WORKSPACE_PATH + '/barcode')
wx.MessageDialog(self,'Software cannot find barcode please go to prefrences and generate a barcode!', 'Information!',wx.OK |wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
fileSelector = wx.FileDialog(self, message="Choose a location to save barcode",style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if fileSelector.ShowModal() == wx.ID_OK:
paths = fileSelector.GetPaths()
if "win" in sys.platform and not 'darwin' in sys.platform:
barcodeFile = file(paths[0] + ".png","wb")
barcodeFile.write(open(WORKSPACE_PATH + '\\barcode\\barcode.png',"rb").read())
elif "linux" in sys.platform or 'darwin'in sys.platform:
paths = unicodedata.normalize('NFKD', paths[0]).encode('ascii','ignore')
barcodeFile = file(paths + ".png","wb")
barcodeFile.write(open(WORKSPACE_PATH + '/barcode/barcode.png',"rb").read())
barcodeFile.close()
wx.MessageDialog(self,'Last generated barcode Saved to specified location', 'Information!',wx.OK |wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
fileSelector.Destroy()
del fileSelector
#######################################################################
#This functions are binding the buttons whenever views are changed
def bindEncodeItems(self):
#self.pnl.but9.Bind(wx.EVT_BUTTON,self.viewString)
#self.pnl.but10.Bind(wx.EVT_BUTTON,self.viewList)
self.pnl.butChoose.Bind(wx.EVT_BUTTON,self.onChoose)
self.pnl.saveBut.Bind(wx.EVT_BUTTON,self.save)
self.pnl.discardBut.Bind(wx.EVT_BUTTON,self.discard)
#self.pnl.clearDB.Bind(wx.EVT_BUTTON,self.onClear)
def bindDecodeItems(self):
self.pnl1.butChoose.Bind(wx.EVT_BUTTON,self.onChoose)
self.pnl1.decodeBut.Bind(wx.EVT_BUTTON,self.decodeBut1)
self.pnl1.resetBut.Bind(wx.EVT_BUTTON,self.discard1)
self.pnl1.decodeBut1.Bind(wx.EVT_BUTTON,self.decodeBut2)
#Splash Screen Class this is used to make the DNA Splash Screen
class MySplashScreen(wx.SplashScreen):
def OnSplashScreenExit(self,e):
self.Hide();
frame = MyFrame(None)
def __init__(self,parent=None):
if "linux" in sys.platform or "darwin" in sys.platform:
bmp = wx.Bitmap(PATH + '/../icons/DNA.png', wx.BITMAP_TYPE_PNG)
elif "win" in sys.platform and not 'darwin' in sys.platform:
bmp = wx.Bitmap(PATH + '\..\icons\DNA.png', wx.BITMAP_TYPE_PNG)
wx.SplashScreen.__init__(self,bmp,wx.SPLASH_CENTER_ON_SCREEN | wx.SPLASH_TIMEOUT,SPLASH_TIMEOUT,parent)
if "win" in sys.platform:
self.OnSplashScreenExit("sad")
else:
self.Bind(wx.EVT_CLOSE,self.OnSplashScreenExit)
###############################################################
if __name__ == "__main__":
app = wx.App()
if not "darwin" in sys.platform:
multiprocessing.freeze_support()
Splash = MySplashScreen()
app.MainLoop()
if not 'darwin' in sys.platform:
sys.exit(0)
|
guptalab/dnacloud
|
source/MainFrame.py
|
Python
|
mit
| 57,098
|
[
"VisIt"
] |
385a6b6f9958a2a936a80fb270fa5addcca36e4040853c6aa175a94a2bf79bbc
|
'''
Created on Nov. 07, 2020
A module to read ERA5 data; this includes converting GRIB files to NetCDF-4,
as well as functions to load the converted and aggregated data.
@author: Andre R. Erler, GPL v3
'''
# external imports
import os.path as osp
import pandas as pd
import numpy as np
import netCDF4 as nc # netCDF4-python module
import xarray as xr
from collections import namedtuple
# internal imports
from datasets.common import getRootFolder
from geodata.gdal import GridDefinition
from datasets.misc import loadXRDataset, getFolderFileName
from geospatial.netcdf_tools import autoChunk
## Meta-vardata
dataset_name = 'ERA5'
root_folder = getRootFolder(dataset_name=dataset_name, fallback_name='NRCan') # get dataset root folder based on environment variables
# SnoDAS grid definition
projdict = dict(proj='longlat',lon_0=0,lat_0=0,x_0=0,y_0=0) # wraps at dateline
proj4_string = '+proj=longlat +ellps=WGS84 +datum=WGS84 +lon_0=0 +lat_0=0 +x_0=0 +y_0=0 +name={} +no_defs'.format(dataset_name)
# ERA5-Land
ERA5Land_geotransform = (-180, 0.1, 0, -90, 0, 0.1)
ERA5Land_size = (3600,1800) # (x,y) map size of grid
ERA5Land_grid = GridDefinition(name=dataset_name, projection=None, geotransform=ERA5Land_geotransform, size=ERA5Land_size)
# southern Ontario
SON10_geotransform = (-85, 0.1, 0, 41, 0, 0.1)
SON10_size = (111,61) # (x,y) map size of grid
SON10_grid = GridDefinition(name=dataset_name, projection=None, geotransform=ERA5Land_geotransform, size=ERA5Land_size)
varatts_list = dict()
# attributes of variables in ERA5-Land
varatts_list['ERA5L'] = dict(# forcing/flux variables
tp = dict(name='precip', units='kg/m^2/s',scalefactor=1000./86400., long_name='Total Precipitation'), # units of meters water equiv. / day
pev = dict(name='pet_era5', units='kg/m^2/s',scalefactor=-1000./86400., long_name='Potential Evapotranspiration'), # units of meters water equiv. / day; negative values
# state variables
sd = dict(name='snow', units='kg/m^2', scalefactor=1.e3, long_name='Snow Water Equivalent'), # units of meters water equivalent
# axes (don't have their own file)
time_stamp = dict(name='time_stamp', units='', long_name='Time Stamp'), # readable time stamp (string)
time = dict(name='time', units='days', long_name='Days'), # time coordinate
lon = dict(name='lon', units='deg', long_name='Longitude'), # geographic longitude
lat = dict(name='lat', units='deg', long_name='Latitude'), # geographic latitude
# derived variables
dswe = dict(name='dswe',units='kg/m^2/s', long_name='SWE Changes'),
liqwatflx = dict(name='liqwatflx', units='kg/m^2/s', long_name='Liquid Water Flux'),
)
# list of variables to load
default_varlists = {name:[atts['name'] for atts in varatts.values()] for name,varatts in varatts_list.items()}
# list of sub-datasets/subsets with titles
DSNT = namedtuple(typename='Dataset', field_names=['name','interval','resolution','title',])
dataset_attributes = dict(ERA5L = DSNT(name='ERA5L',interval='1h', resolution=0.1, title='ERA5-Land',), # downscaled land reanalysis
ERA5S = DSNT(name='ERA5S',interval='1h', resolution=0.3, title='ERA5-Sfc',), # regular surface; not verified
ERA5A = DSNT(name='ERA5A',interval='6h', resolution=0.3, title='ERA5-Atm',),) # regular 3D; not verified
# settings for NetCDF-4 files
avgfolder = root_folder + dataset_name.lower()+'avg/'
avgfile = 'era5{0:s}_clim{1:s}.nc' # the filename needs to be extended: biascorrection, grid and period
tsfile = 'era5_{0:s}{1:s}{2:s}_monthly.nc' # extend with biascorrection, variable and grid type
daily_folder = root_folder + dataset_name.lower()+'_daily/'
netcdf_filename = 'era5_{:s}_daily.nc' # extend with variable name
netcdf_dtype = np.dtype('<f4') # little-endian 32-bit float
netcdf_settings = dict(chunksizes=(8,ERA5Land_size[0]/16,ERA5Land_size[1]/32))
## functions to load NetCDF datasets (using xarray)
def loadERA5_Daily(varname=None, varlist=None, dataset=None, subset=None, grid=None, resolution=None, shape=None, station=None,
resampling=None, varatts=None, varmap=None, lgeoref=True, geoargs=None, lfliplat=False, aggregation='daily',
mode='daily', chunks=True, multi_chunks=None, lxarray=True, lgeospatial=True, **kwargs):
''' function to load daily ERA5 data from NetCDF-4 files using xarray and add some projection information '''
if not ( lxarray and lgeospatial ):
raise NotImplementedError("Only loading via geospatial.xarray_tools is currently implemented.")
if dataset and subset:
if dataset != subset:
raise ValueError((dataset,subset))
elif dataset and not subset:
subset = dataset
if resolution is None:
if grid and grid[:3] in ('son','snw',): resolution = 'SON60'
else: resolution = 'NA10' # default
if varatts is None:
if grid is None and station is None and shape is None: varatts = varatts_list[subset] # original files
default_varlist = default_varlists.get(dataset, None)
xds = loadXRDataset(varname=varname, varlist=varlist, dataset='ERA5', subset=subset, grid=grid, resolution=resolution, shape=shape,
station=station, default_varlist=default_varlist, resampling=resampling, varatts=varatts, varmap=varmap, mode=mode,
aggregation=aggregation, lgeoref=lgeoref, geoargs=geoargs, chunks=chunks, multi_chunks=multi_chunks, **kwargs)
# flip latitude dimension
if lfliplat and 'latitude' in xds.coords:
xds = xds.reindex(latitude=xds.latitude[::-1])
# update name and title with sub-dataset
xds.attrs['name'] = subset
xds.attrs['title'] = dataset_attributes[subset].title + xds.attrs['title'][len(subset)-1:]
return xds
## Dataset API
dataset_name # dataset name
root_folder # root folder of the dataset
orig_file_pattern = netcdf_filename # filename pattern: variable name (daily)
ts_file_pattern = tsfile # filename pattern: variable name and grid
clim_file_pattern = avgfile # filename pattern: grid and period
data_folder = avgfolder # folder for user data
grid_def = {'':ERA5Land_grid} # no special name, since there is only one...
LTM_grids = [] # grids that have long-term mean data
TS_grids = ['',] # grids that have time-series data
grid_res = {res:0.25 for res in TS_grids} # no special name, since there is only one...
default_grid = ERA5Land_grid
# functions to access specific datasets
loadLongTermMean = None # climatology provided by publisher
loadDailyTimeSeries = loadERA5_Daily # daily time-series data
# monthly time-series data for batch processing
def loadTimeSeries(lxarray=False, **kwargs): raise NotImplementedError(lxarray=lxarray, **kwargs)
loadClimatology = None # pre-processed, standardized climatology
loadStationClimatology = None # climatologies without associated grid (e.g. stations)
loadStationTimeSeries = None # time-series without associated grid (e.g. stations)
loadShapeClimatology = None # climatologies without associated grid (e.g. provinces or basins)
loadShapeTimeSeries = None # time-series without associated grid (e.g. provinces or basins)
## abuse for testing
if __name__ == '__main__':
import time, gc, os
#print('xarray version: '+xr.__version__+'\n')
xr.set_options(keep_attrs=True)
# import dask
# from dask.distributed import Client, LocalCluster
# # force multiprocessing (4 cores)
# cluster = LocalCluster(n_workers=2, memory_limit='1GB')
# cluster = LocalCluster(n_workers=4, memory_limit='6GB')
# cluster = LocalCluster(n_workers=1)
# client = Client(cluster)
modes = []
# modes += ['load_Point_Climatology']
# modes += ['load_Point_Timeseries']
modes += ['derived_variables' ]
# modes += ['load_Daily' ]
# modes += ['monthly_mean' ]
# modes += ['load_TimeSeries' ]
# modes += ['monthly_normal' ]
# modes += ['load_Climatology' ]
grid = None; resampling = None
dataset = 'ERA5L'
# resolution = 'SON10'
resolution = 'NA10'
# resolution = 'AU10'
# variable list
# varlist = ['snow']
varlist = ['snow','dswe','precip','pet_era5','liqwatflx']
# period = (2010,2019)
# period = (1997,2018)
# period = (1980,2018)
# loop over modes
for mode in modes:
if mode == 'load_Climatology':
pass
# lxarray = False
# ds = loadERA5(varlist=varlist, period=period, grid=grid,
# lxarray=lxarray) # load regular GeoPy dataset
# print(ds)
# print('')
# varname = list(ds.variables.keys())[0]
# var = ds[varname]
# print(var)
#
# if lxarray:
# print(('Size in Memory: {:6.1f} MB'.format(var.nbytes/1024./1024.)))
elif mode == 'load_Point_Climatology':
pass
# # load point climatology
# print('')
# if pntset in ('shpavg','glbshp'): dataset = loadERA5_Shp(shape=pntset, period=(2009,2018))
# elif pntset in ('oncat'): dataset = loadERA5_Shp(shape=pntset, grid=grid, period=(2011,2019))
# else: raise NotImplementedError(pntset)
# print(dataset)
# print('')
# print((dataset.time))
# print((dataset.time.coord))
elif mode == 'load_Point_Timeseries':
pass
# # load point climatology
# print('')
# if pntset in ('oncat'): dataset = loadERA5_ShpTS(shape=pntset, grid=grid, )
# else: raise NotImplementedError(pntset)
# print(dataset)
# print('')
# print((dataset.time))
# print((dataset.time.coord))
elif mode == 'monthly_normal':
pass
elif mode == 'load_TimeSeries':
pass
# lxarray = False
# varname = varlist[0]
# xds = loadERA5_TS(varlist=varlist,
# grid=grid, lxarray=lxarray, geoargs=geoargs) # 32 time chunks may be possible
# print(xds)
# print('')
# xv = xds[varname]
# print(xv)
# if lxarray:
# print(('Size in Memory: {:6.1f} MB'.format(xv.nbytes/1024./1024.)))
elif mode == 'monthly_mean':
pass
elif mode == 'load_Daily':
varlist = ['snow','dswe']
xds = loadERA5_Daily(varlist=varlist, resolution=resolution, dataset=None, subset='ERA5L', grid=grid,
chunks=True, lgeoref=True)
print(xds)
# print('')
xv = xds.data_vars['snow']
# # xv = list(xds.data_vars.values())[0]
xv = xv.loc['2011-06-01':'2012-06-01',:,:]
# # xv = xv.loc['2011-01-01',:,:]
print(xv)
print(xv.mean())
print(('Size in Memory: {:6.1f} MB'.format(xv.nbytes/1024./1024.)))
elif mode == 'derived_variables':
start = time.time()
lexec = True
lappend_master = False
ts_name = 'time_stamp'
dataset = 'ERA5L'
load_chunks = True
# load variables
# derived_varlist = ['dswe',]; load_list = ['snow']
derived_varlist = ['liqwatflx',]; load_list = ['dswe', 'precip']
varatts = varatts_list[dataset]
xds = loadERA5_Daily(varlist=load_list, subset=dataset, resolution=resolution, grid=grid,
chunks=load_chunks, lfliplat=False)
# N.B.: need to avoid loading derived variables, because they may not have been extended yet (time length)
print(xds)
# optional slicing (time slicing completed below)
start_date = None; end_date = None # auto-detect available data
# start_date = '2011-01-01'; end_date = '2011-01-08'
# slice and load time coordinate
xds = xds.loc[{'time':slice(start_date,end_date),}]
if ts_name in xds:
tsvar = xds[ts_name].load()
else:
tax = xds.coords['time']
ts_data = [pd.to_datetime(dt).strftime('%Y-%m-%d_%H:%M:%S') for dt in tax.data]
tsvar = xr.DataArray(data=ts_data, coords=(tax,), name='time_stamp', attrs=varatts['time_stamp'])
# loop over variables
for varname in derived_varlist:
# target dataset
lskip = False
folder,filename = getFolderFileName(varname=varname, dataset='ERA5', subset=dataset, resolution=resolution, grid=grid,
resampling=resampling, mode='daily', lcreateFolder=True)
nc_filepath = '{}/{}'.format(folder,filename)
if lappend_master and osp.exists(nc_filepath):
ncds = nc.Dataset(nc_filepath, mode='a')
ncvar3 = ncds[varname]
ncts = ncds[ts_name]
nctc = ncds['time'] # time coordinate
# update start date for after present data
start_date = pd.to_datetime(ncts[-1]) + pd.to_timedelta(1,unit='D')
if end_date is None: end_date = tsvar.data[-1]
end_date = pd.to_datetime(end_date)
if start_date > end_date:
print(("\nNothing to do - timeseries complete:\n {} > {}".format(start_date,end_date)))
ncds.close()
lskip = True
else:
lappend = True
# update slicing (should not do anything if sliced before)
print(("\n Appending data from {} to {}.\n".format(start_date.strftime("%Y-%m-%d"),end_date.strftime("%Y-%m-%d"))))
xds = xds.loc[{'time':slice(start_date,end_date),}]
tsvar = tsvar.loc[{'time':slice(start_date,end_date),}]
else:
lappend = False
if not lskip:
print('\n')
default_varatts = varatts[varname] # need to ensure netCDF compatibility
## define actual computation
if varname == 'liqwatflx':
ref_var = xds['precip']; note = "masked/missing values have been replaced by zero"
xvar = ref_var.fillna(0) - xds['dswe'].fillna(0) # fill missing values with zero
# N.B.: missing values are NaN in xarray; we need to fill with 0, or masked/missing values
# in snowmelt will mask/invalidate valid values in precip
elif varname == 'dswe':
ref_var = xds['snow']; note = "Rate of Daily SWE Changes"
assert ref_var.attrs['units'] == 'kg/m^2', ref_var.attrs['units']
#xvar = ref_var.differentiate('time', datetime_unit='s')
xvar = ref_var.diff('time', n=1) / 86400 # per second
# shift time axis
time_axis = xvar.coords['time'].data - np.timedelta64(1,'D')
xvar = xvar.assign_coords(time=time_axis).broadcast_like(ref_var)
# define/copy metadata
xvar.attrs = ref_var.attrs.copy()
xvar = xvar.rename(varname)
for att in ('name','units','long_name',): # don't copy scale factors etc...
if att in default_varatts: xvar.attrs[att] = default_varatts[att]
assert xvar.attrs['name'] == xvar.name, xvar.attrs
for att in list(xvar.attrs.keys()):
if att.startswith('old_') or att in ('original_name','standard_name'):
del xvar.attrs[att] # does not apply anymore
xvar.attrs['note'] = note
# set chunking for operation
chunks = ref_var.encoding['chunksizes'] if load_chunks is True else load_chunks.copy()
if chunks:
if isinstance(chunks,dict):
chunks = tuple(chunks[dim] for dim in xvar.dims)
xvar = xvar.chunk(chunks=chunks)
print('Chunks:',xvar.chunks)
# # visualize task graph
# viz_file = daily_folder+'dask_sum.svg'
# xvar3.data.visualize(filename=viz_file)
# print(viz_file)
## now save data, according to destination/append mode
if lappend:
# append results to an existing file
print('\n')
# define chunking
offset = ncts.shape[0]; t_max = offset + tsvar.shape[0]
tc,yc,xc = xvar.chunks # starting points of all blocks...
tc = np.concatenate([[0],np.cumsum(tc[:-1], dtype=np.int)])
yc = np.concatenate([[0],np.cumsum(yc[:-1], dtype=np.int)])
xc = np.concatenate([[0],np.cumsum(xc[:-1], dtype=np.int)])
# xvar3 = xvar3.chunk(chunks=(tc,xvar3.shape[1],xvar3.shape[2]))
# function to save each block individually (not sure if this works in parallel)
dummy = np.zeros((1,1,1), dtype=np.int8)
def save_chunk(block, block_id=None):
ts = offset + tc[block_id[0]]; te = ts + block.shape[0]
ys = yc[block_id[1]]; ye = ys + block.shape[1]
xs = xc[block_id[2]]; xe = xs + block.shape[2]
#print(((ts,te),(ys,ye),(xs,xe)))
#print(block.shape)
ncvar3[ts:te,ys:ye,xs:xe] = block
return dummy
# append to NC variable
xvar.data.map_blocks(save_chunk, chunks=dummy.shape, dtype=dummy.dtype).compute() # drop_axis=(0,1,2),
# update time stamps and time axis
nctc[offset:t_max] = np.arange(offset,t_max)
for i in range(tsvar.shape[0]): ncts[i+offset] = tsvar.data[i]
ncds.sync()
print('\n')
print(ncds)
ncds.close()
del xvar, ncds
else:
# save results in new file
nds = xr.Dataset({ts_name:tsvar, varname:xvar,}, attrs=xds.attrs.copy())
nds.coords['time'].attrs.pop('units',None) # needs to be free for use by xarray
print('\n')
print(nds)
print(nc_filepath)
# write to NetCDF
tmp_filepath = nc_filepath + '.tmp' # use temporary file during creation
var_enc = dict(chunksizes=chunks, zlib=True, complevel=1, _FillValue=np.NaN, dtype=netcdf_dtype)
task = nds.to_netcdf(tmp_filepath, mode='w', format='NETCDF4', unlimited_dims=['time'], engine='netcdf4',
encoding={varname:var_enc,}, compute=False)
if lexec:
task.compute()
else:
print(var_enc)
print(task)
task.visualize(filename=folder+'netcdf.svg') # This file is never produced
del nds, xvar
# replace original file
if os.path.exists(nc_filepath): os.remove(nc_filepath)
os.rename(tmp_filepath, nc_filepath)
# clean up
gc.collect()
# print timing
end = time.time()
print(('\n Required time: {:.0f} seconds\n'.format(end-start)))
|
aerler/GeoPy
|
src/datasets/ERA5.py
|
Python
|
gpl-3.0
| 20,429
|
[
"NetCDF"
] |
1b3ce7cadde96b79178bdefa5baa83bbfc6edb5a90cf808de20bd0c814509cb0
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import division
import numpy as np
import h5py as h5
import pkg_resources
from yaff import *
from yaff.conversion.gaussian import _scan_g09_forces, _scan_g09_time, \
_scan_g09_pos_vel, _scan_to_line
def test_scan_forces():
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with open(fn_log) as f:
numbers, frc = _scan_g09_forces(f)
assert numbers[0] == 14
assert numbers[1] == 8
assert numbers[-1] == 1
assert len(numbers) == 9
assert frc[0,0] == 0.000014646
assert frc[1,-1] == 0.005043566
assert frc[-1,1] == 0.002557226
assert frc.shape == (9, 3)
def test_scan_time():
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with open(fn_log) as f:
time, step, ekin, epot, etot = _scan_g09_time(f)
assert time == 0.0
assert step == 2
assert ekin == 0.0306188
assert epot == -592.9048374
assert etot == -592.8742186
time, step, ekin, epot, etot = _scan_g09_time(f)
assert time == 1.125278*femtosecond
assert step == 3
assert ekin == 0.0244215
assert epot == -592.8986401
assert etot == -592.8742186
def test_scan_pos_vel():
vel_unit = np.sqrt(amu)/second
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with open(fn_log) as f:
_scan_to_line(f, " Cartesian coordinates: (bohr)") # skip first one, has different format
pos, vel = _scan_g09_pos_vel(f)
assert pos[0,0] == -1.287811626725E-02
assert pos[-1,-1] == 2.710579145562E+00
assert pos.shape == (9, 3)
assert vel[1, 0] == 5.750552889614E+13*vel_unit
assert vel[-2, 2] == 1.741570818851E+13*vel_unit
assert vel.shape == (9, 3)
def test_to_hdf():
vel_unit = np.sqrt(amu)/second
fn_xyz = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.xyz')
fn_log = pkg_resources.resource_filename(__name__, '../../data/test/gaussian_sioh4_md.log')
with h5.File('yaff.conversion.test.test_gaussian.test_to_hdf5.h5', driver='core', backing_store=False) as f:
system = System.from_file(fn_xyz)
system.to_hdf5(f)
# Actual trajectory conversion, twice
for i in range(2):
offset = 2*i
g09log_to_hdf5(f, fn_log)
assert 'trajectory' in f
assert get_last_trajectory_row(f['trajectory']) == 2+offset
assert 'pos' in f['trajectory']
assert f['trajectory/pos'].shape == (2+offset, 9, 3)
assert f['trajectory/pos'][offset,0,0] == -1.287811626725E-02
assert f['trajectory/pos'][-1,-1,-1] == 2.710239686065E+00
assert 'vel' in f['trajectory']
assert f['trajectory/vel'].shape == (2+offset, 9, 3)
assert f['trajectory/vel'][offset,0,0] == -6.493457131863E+13*vel_unit
assert f['trajectory/vel'][-1,-1,-1] == 4.186482857132E+12*vel_unit
assert 'frc' in f['trajectory']
assert f['trajectory/frc'].shape == (2+offset, 9, 3)
assert f['trajectory/frc'][offset,0,0] == 0.002725302
assert f['trajectory/frc'][-1,-1,-1] == 0.008263482
assert 'time' in f['trajectory']
assert f['trajectory/time'].shape == (2+offset, 1)
assert f['trajectory/time'][offset] == 0.0
assert f['trajectory/time'][-1] == 1.125278*femtosecond
assert 'step' in f['trajectory']
assert f['trajectory/step'].shape == (2+offset, 1)
assert f['trajectory/step'][offset] == 2
assert f['trajectory/step'][-1] == 3
assert 'epot' in f['trajectory']
assert f['trajectory/epot'].shape == (2+offset, 1)
assert f['trajectory/epot'][offset] == -592.9048374
assert f['trajectory/epot'][-1] == -592.8986401
assert 'ekin' in f['trajectory']
assert f['trajectory/ekin'].shape == (2+offset, 1)
assert f['trajectory/ekin'][offset] == 0.0306188
assert f['trajectory/ekin'][-1] == 0.0244215
assert 'etot' in f['trajectory']
assert f['trajectory/etot'].shape == (2+offset, 1)
assert f['trajectory/etot'][offset] == -592.8742186
assert f['trajectory/etot'][-1] == -592.8742186
f.close()
|
molmod/yaff
|
yaff/conversion/test/test_gaussian.py
|
Python
|
gpl-3.0
| 5,408
|
[
"Gaussian"
] |
d31fe1992090ab5461b3d69a7a45d958b109984d9f6f434c34a1267f3777e22e
|
from django import template
import markdown
from let_me_app import models
from django.utils.translation import gettext
from django.utils import timezone
register = template.Library()
@register.filter
def markdownify(text):
# safe_mode governs how the function handles raw HTML
return markdown.markdown(text, safe_mode='escape')
@register.filter
def is_visit(obj):
return isinstance(obj, models.Visit)
@register.filter
def is_application(obj):
return isinstance(obj, models.Application)
@register.filter
def is_proposal(obj):
return isinstance(obj, models.Proposal)
@register.filter
def model_name(obj):
return gettext(obj.__class__.__name__)
@register.filter
def is_outdated(obj):
if (obj.event.status != models.EventStatuses.PENDING
or obj.event.start_at < timezone.now()):
return True
if isinstance(obj, models.Proposal):
return obj.status != models.ProposalStatuses.ACTIVE
if isinstance(obj, models.Application):
return obj.status != models.ApplicationStatuses.ACTIVE
if isinstance(obj, models.Visit):
return obj.status != models.VisitStatuses.PENDING
return True
|
oleg-chubin/let_me_play
|
let_me_app/templatetags/let_me_app.py
|
Python
|
apache-2.0
| 1,165
|
[
"VisIt"
] |
22dfb174cdb861cbc5d4ddf32c15364e1d92b7288ecc41857417e17781080f75
|
#!/usr/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Test YAML functions.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import time
import shutil
import textwrap
import unittest
import tempfile
import itertools
from nose.tools import assert_raises
from nose.plugins.attrib import attr
from mdtraj.formats.mol2 import mol2_to_dataframes
from yank.yamlbuild import *
# ==============================================================================
# Subroutines for testing
# ==============================================================================
standard_protocol = """
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]"""
def indent(str):
"""Put 4 extra spaces in front of every line."""
return '\n '.join(str.split('\n'))
def examples_paths():
"""Return the absolute path to the Yank examples relevant to tests."""
data_dir = utils.get_data_filename(os.path.join('tests', 'data'))
p_xylene_dir = os.path.join(data_dir, 'p-xylene-implicit')
p_xylene_gro_dir = os.path.join(data_dir, 'p-xylene-gromacs-example')
ben_tol_dir = os.path.join(data_dir, 'benzene-toluene-explicit')
abl_imatinib_dir = os.path.join(data_dir, 'abl-imatinib-explicit')
tol_dir = os.path.join(data_dir, 'toluene-explicit')
paths = dict()
paths['lysozyme'] = os.path.join(p_xylene_dir, '181L-pdbfixer.pdb')
paths['p-xylene'] = os.path.join(p_xylene_dir, 'p-xylene.mol2')
paths['benzene'] = os.path.join(ben_tol_dir, 'benzene.tripos.mol2')
paths['toluene'] = os.path.join(ben_tol_dir, 'toluene.tripos.mol2')
paths['abl'] = os.path.join(abl_imatinib_dir, '2HYY-pdbfixer.pdb')
paths['imatinib'] = os.path.join(abl_imatinib_dir, 'STI02.mol2')
paths['bentol-complex'] = [os.path.join(ben_tol_dir, 'complex.prmtop'),
os.path.join(ben_tol_dir, 'complex.inpcrd')]
paths['bentol-solvent'] = [os.path.join(ben_tol_dir, 'solvent.prmtop'),
os.path.join(ben_tol_dir, 'solvent.inpcrd')]
paths['pxylene-complex'] = [os.path.join(p_xylene_gro_dir, 'complex.top'),
os.path.join(p_xylene_gro_dir, 'complex.gro')]
paths['pxylene-solvent'] = [os.path.join(p_xylene_gro_dir, 'solvent.top'),
os.path.join(p_xylene_gro_dir, 'solvent.gro')]
paths['pxylene-gro-include'] = os.path.join(p_xylene_gro_dir, 'top')
paths['toluene-solvent'] = [os.path.join(tol_dir, 'solvent.pdb'),
os.path.join(tol_dir, 'solvent.xml')]
paths['toluene-vacuum'] = [os.path.join(tol_dir, 'vacuum.pdb'),
os.path.join(tol_dir, 'vacuum.xml')]
return paths
def yank_load(script):
"""Shortcut to load a string YAML script with YankLoader."""
return yaml.load(textwrap.dedent(script), Loader=YankLoader)
def get_template_script(output_dir='.'):
"""Return a YAML template script as a dict."""
paths = examples_paths()
template_script = """
---
options:
output_dir: {output_dir}
number_of_iterations: 1
temperature: 300*kelvin
pressure: 1*atmosphere
molecules:
benzene:
filepath: {benzene_path}
antechamber: {{charge_method: bcc}}
benzene-epik0:
filepath: {benzene_path}
epik:
select: 0
antechamber: {{charge_method: bcc}}
benzene-epikcustom:
filepath: {benzene_path}
epik:
select: 0
ph: 7.0
tautomerize: yes
antechamber: {{charge_method: bcc}}
p-xylene:
filepath: {pxylene_path}
antechamber: {{charge_method: bcc}}
p-xylene-name:
name: p-xylene
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
toluene:
filepath: {toluene_path}
antechamber: {{charge_method: bcc}}
toluene-smiles:
smiles: Cc1ccccc1
antechamber: {{charge_method: bcc}}
toluene-name:
name: toluene
antechamber: {{charge_method: bcc}}
Abl:
filepath: {abl_path}
T4Lysozyme:
filepath: {lysozyme_path}
solvents:
vacuum:
nonbonded_method: NoCutoff
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
PME:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
positive_ion: Na+
negative_ion: Cl-
systems:
explicit-system:
receptor: benzene
ligand: toluene
solvent: PME
leap:
parameters: [oldff/leaprc.ff14SB, leaprc.gaff, frcmod.ionsjc_tip3p]
implicit-system:
receptor: T4Lysozyme
ligand: p-xylene
solvent: GBSA-OBC2
leap:
parameters: [oldff/leaprc.ff14SB, leaprc.gaff]
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]
experiments:
system: explicit-system
protocol: absolute-binding
""".format(output_dir=output_dir, benzene_path=paths['benzene'],
pxylene_path=paths['p-xylene'], toluene_path=paths['toluene'],
abl_path=paths['abl'], lysozyme_path=paths['lysozyme'])
return yank_load(template_script)
# ==============================================================================
# YamlBuild utility functions
# ==============================================================================
def test_compute_min_dist():
"""Test computation of minimum distance between two molecules"""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) == np.sqrt(3)
def test_compute_dist_bound():
"""Test compute_dist_bound() function."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]])
mol2_pos = np.array([[2, 2, 2], [2, 4, 5]]) # determine min dist
mol3_pos = np.array([[3, 3, 3], [3, 4, 5]]) # determine max dist
min_dist, max_dist = compute_dist_bound(mol1_pos, mol2_pos, mol3_pos)
assert min_dist == np.linalg.norm(mol1_pos[1] - mol2_pos[0])
assert max_dist == np.linalg.norm(mol1_pos[1] - mol3_pos[1])
def test_remove_overlap():
"""Test function remove_overlap()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[1, 1, 1], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) < 0.1
mol1_pos = remove_overlap(mol1_pos, mol2_pos, mol3_pos, min_distance=0.1, sigma=2.0)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) >= 0.1
def test_pull_close():
"""Test function pull_close()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol3_pos = np.array([[10, 10, 10], [13, 14, 15]], np.float)
translation2 = pull_close(mol1_pos, mol2_pos, 1.5, 5)
translation3 = pull_close(mol1_pos, mol3_pos, 1.5, 5)
assert isinstance(translation2, np.ndarray)
assert 1.5 <= compute_min_dist(mol1_pos, mol2_pos + translation2) <= 5
assert 1.5 <= compute_min_dist(mol1_pos, mol3_pos + translation3) <= 5
def test_pack_transformation():
"""Test function pack_transformation()."""
BOX_SIZE = 5
CLASH_DIST = 1
mol1 = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mols = [np.copy(mol1), # distance = 0
mol1 + 2 * BOX_SIZE] # distance > box
mols_affine = [np.append(mol, np.ones((2, 1)), axis=1) for mol in mols]
transformations = [pack_transformation(mol1, mol2, CLASH_DIST, BOX_SIZE) for mol2 in mols]
for mol, transf in zip(mols_affine, transformations):
assert isinstance(transf, np.ndarray)
mol2 = mol.dot(transf.T)[:, :3] # transform and "de-affine"
min_dist, max_dist = compute_dist_bound(mol1, mol2)
assert CLASH_DIST <= min_dist and max_dist <= BOX_SIZE
# ==============================================================================
# YAML parsing and validation
# ==============================================================================
def test_yaml_parsing():
"""Check that YAML file is parsed correctly."""
# Parser handles no options
yaml_content = """
---
test: 2
"""
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
assert len(yaml_builder.options) == len(yaml_builder.DEFAULT_OPTIONS)
assert len(yaml_builder.yank_options) == 0
# Correct parsing
yaml_content = """
---
metadata:
title: Test YANK YAML YAY!
options:
verbose: true
resume_setup: true
resume_simulation: true
output_dir: /path/to/output/
setup_dir: /path/to/output/setup/
experiments_dir: /path/to/output/experiments/
platform: CPU
precision: single
temperature: 300*kelvin
pressure: null
constraints: AllBonds
hydrogen_mass: 2*amus
randomize_ligand: yes
randomize_ligand_sigma_multiplier: 2.0
randomize_ligand_close_cutoff: 1.5 * angstrom
mc_displacement_sigma: 10.0 * angstroms
anisotropic_dispersion_correction: no
collision_rate: 5.0 / picosecond
constraint_tolerance: 1.0e-6
timestep: 2.0 * femtosecond
nsteps_per_iteration: 2500
number_of_iterations: 1000.999
equilibration_timestep: 1.0 * femtosecond
number_of_equilibration_iterations: 100
minimize: False
minimize_tolerance: 1.0 * kilojoules_per_mole / nanometers
minimize_max_iterations: 0
replica_mixing_scheme: swap-all
online_analysis: no
online_analysis_min_iterations: 20
show_energies: True
show_mixing_statistics: yes
annihilate_sterics: no
annihilate_electrostatics: true
"""
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
assert len(yaml_builder.options) == 35
assert len(yaml_builder.yank_options) == 23
# Check correct types
assert yaml_builder.options['pressure'] is None
assert yaml_builder.options['constraints'] == openmm.app.AllBonds
assert yaml_builder.yank_options['replica_mixing_scheme'] == 'swap-all'
assert yaml_builder.yank_options['timestep'] == 2.0 * unit.femtoseconds
assert yaml_builder.yank_options['constraint_tolerance'] == 1.0e-6
assert yaml_builder.yank_options['nsteps_per_iteration'] == 2500
assert type(yaml_builder.yank_options['nsteps_per_iteration']) is int
assert yaml_builder.yank_options['number_of_iterations'] == 1000
assert type(yaml_builder.yank_options['number_of_iterations']) is int
assert yaml_builder.yank_options['minimize'] is False
assert yaml_builder.yank_options['show_mixing_statistics'] is True
def test_validation_wrong_options():
"""YAML validation raises exception with wrong molecules."""
options = [
{'unknown_options': 3},
{'minimize': 100}
]
for option in options:
yield assert_raises, YamlParseError, YamlBuilder._validate_options, option
def test_validation_correct_molecules():
"""Correct molecules YAML validation."""
paths = examples_paths()
molecules = [
{'name': 'toluene', 'leap': {'parameters': 'leaprc.gaff'}},
{'name': 'toluene', 'leap': {'parameters': ['leaprc.gaff', 'toluene.frcmod']}},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': None}},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'},
'epik': {'ph': 7.6, 'ph_tolerance': 0.7, 'tautomerize': False, 'select': 0}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': None}, 'epik': {'select': 1}},
{'filepath': paths['abl']},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}},
{'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}, 'select': 1},
{'filepath': paths['abl'], 'select': 'all'},
{'filepath': paths['toluene'], 'leap': {'parameters': 'leaprc.gaff'}},
{'filepath': paths['benzene'], 'epik': {'select': 1, 'tautomerize': False}}
]
for molecule in molecules:
yield YamlBuilder._validate_molecules, {'mol': molecule}
def test_validation_wrong_molecules():
"""YAML validation raises exception with wrong molecules."""
paths = examples_paths()
paths['wrongformat'] = utils.get_data_filename(os.path.join('tests', 'data', 'README.md'))
molecules = [
{'antechamber': {'charge_method': 'bcc'}},
{'filepath': paths['wrongformat']},
{'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}, 'unknown': 4},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'invalid'},
'antechamber': {'charge_method': None}},
{'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},
'antechamber': {'charge_method': 'bcc'}},
{'filepath': 'nonexistentfile.pdb', 'leap': {'parameters': 'leaprc.ff14SB'}},
{'filepath': paths['toluene'], 'smiles': 'Cc1ccccc1'},
{'filepath': paths['toluene'], 'strip_protons': True},
{'filepath': paths['abl'], 'leap': {'parameters': 'oldff/leaprc.ff14SB'}, 'epik': {'select': 0}},
{'name': 'toluene', 'epik': 0},
{'name': 'toluene', 'epik': {'tautomerize': 6}},
{'name': 'toluene', 'epik': {'extract_range': 1}},
{'name': 'toluene', 'smiles': 'Cc1ccccc1'},
{'name': 3},
{'smiles': 'Cc1ccccc1', 'select': 1},
{'name': 'Cc1ccccc1', 'select': 1},
{'filepath': paths['abl'], 'leap': {'parameters': 'oldff/leaprc.ff14SB'}, 'select': 'notanoption'},
]
for molecule in molecules:
yield assert_raises, YamlParseError, YamlBuilder._validate_molecules, {'mol': molecule}
def test_validation_correct_solvents():
"""Correct solvents YAML validation."""
solvents = [
{'nonbonded_method': 'NoCutoff', 'nonbonded_cutoff': '3*nanometers'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms'},
{'nonbonded_method': 'PME'},
{'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBC2'},
{'nonbonded_method': 'CutoffPeriodic', 'nonbonded_cutoff': '9*angstroms',
'clearance': '9*angstroms', 'positive_ion': 'Na+', 'negative_ion': 'Cl-'},
{'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*(nano*mole)/liter'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'ewald_error_tolerance': 0.001},
]
for solvent in solvents:
yield YamlBuilder._validate_solvents, {'solv': solvent}
def test_validation_wrong_solvents():
"""YAML validation raises exception with wrong solvents."""
solvents = [
{'nonbonded_cutoff: 3*nanometers'},
{'nonbonded_method': 'PME', 'clearance': '3*angstroms', 'implicit_solvent': 'OBC2'},
{'nonbonded_method': 'NoCutoff', 'blabla': '3*nanometers'},
{'nonbonded_method': 'NoCutoff', 'implicit_solvent': 'OBX2'},
{'implicit_solvent': 'OBC2', 'implicit_solvent_salt_conc': '1.0*angstrom'}
]
for solvent in solvents:
yield assert_raises, YamlParseError, YamlBuilder._validate_solvents, {'solv': solvent}
def test_validation_correct_systems():
"""Correct systems YAML validation."""
data_paths = examples_paths()
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
solv2: {{nonbonded_method: NoCutoff, implicit_solvent: OBC2}}
solv3: {{nonbonded_method: PME, clearance: 10*angstroms}}
solv4: {{nonbonded_method: PME}}
""".format(data_paths['lysozyme'])
basic_script = yaml.load(textwrap.dedent(basic_script))
systems = [
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv', 'pack': True},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv3',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv4'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent1': 'solv3',
'solvent2': 'solv2'},
{'phase1_path': data_paths['pxylene-complex'],
'phase2_path': data_paths['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',
'gromacs_include_dir': data_paths['pxylene-gro-include']},
{'phase1_path': data_paths['pxylene-complex'],
'phase2_path': data_paths['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv'},
{'phase1_path': data_paths['toluene-solvent'],
'phase2_path': data_paths['toluene-vacuum'],
'ligand_dsl': 'resname TOL'},
{'phase1_path': data_paths['toluene-solvent'],
'phase2_path': data_paths['toluene-vacuum'],
'ligand_dsl': 'resname TOL', 'solvent_dsl': 'not resname TOL'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv',
'leap': {'parameters': 'leaprc.gaff'}}
]
for system in systems:
modified_script = basic_script.copy()
modified_script['systems'] = {'sys': system}
yield yaml_builder.parse, modified_script
def test_validation_wrong_systems():
"""YAML validation raises exception with wrong experiments specification."""
data_paths = examples_paths()
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
solv2: {{nonbonded_method: NoCutoff, implicit_solvent: OBC2}}
solv3: {{nonbonded_method: PME, clearance: 10*angstroms}}
solv4: {{nonbonded_method: PME}}
""".format(data_paths['lysozyme'])
basic_script = yaml.load(textwrap.dedent(basic_script))
systems = [
{'receptor': 'rec', 'ligand': 'lig'},
{'receptor': 'rec', 'ligand': 1, 'solvent': 'solv'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': ['solv', 'solv']},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'unknown'},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv4',
'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}},
{'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv3',
'parameters': 'leaprc.ff14SB'},
{'phase1_path': data_paths['bentol-complex'][0],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': ['nonexistingpath.prmtop', 'nonexistingpath.inpcrd'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent': 'solv'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 3.4, 'solvent': 'solv'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['bentol-solvent'],
'ligand_dsl': 'resname BEN', 'solvent1': 'unknown',
'solvent2': 'solv2'},
{'phase1_path': data_paths['bentol-complex'],
'phase2_path': data_paths['pxylene-solvent'],
'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',
'gromacs_include_dir': data_paths['pxylene-gro-include']},
{'phase1_path': data_paths['toluene-solvent'],
'phase2_path': data_paths['toluene-vacuum'],
'ligand_dsl': 'resname TOL', 'solvent': 'cantbespecified'},
{'receptor': 'rec', 'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'ligand': 'lig', 'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},
{'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv', 'leap': 'leaprc.gaff'}
]
for system in systems:
modified_script = basic_script.copy()
modified_script['systems'] = {'sys': system}
yield assert_raises, YamlParseError, yaml_builder.parse, modified_script
def test_order_phases():
"""YankLoader preserves protocol phase order."""
yaml_content_template = """
---
absolute-binding:
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]
{}:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]"""
# Find order of phases for which normal parsing is not ordered or the test is useless
for ordered_phases in itertools.permutations(['athirdphase', 'complex', 'solvent']):
yaml_content = yaml_content_template.format(*ordered_phases)
parsed = yaml.load(textwrap.dedent(yaml_content))
if tuple(parsed['absolute-binding'].keys()) != ordered_phases:
break
# Insert !Ordered tag
yaml_content = yaml_content.replace('binding:', 'binding: !Ordered')
parsed = yank_load(yaml_content)
assert tuple(parsed['absolute-binding'].keys()) == ordered_phases
def test_validation_correct_protocols():
"""Correct protocols YAML validation."""
basic_protocol = yank_load(standard_protocol)
# Alchemical paths
protocols = [
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 0.0]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 0.0],
'lambda_torsions': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_angles': [1.0, 0.8, 0.6, 0.3, 0.0]}
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol
yield YamlBuilder._validate_protocols, modified_protocol
# Phases
alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])
protocols = [
{'complex': alchemical_path, 'solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-solvent': alchemical_path},
{'solvent1': alchemical_path, 'solvent2': alchemical_path},
{'solvent1variant': alchemical_path, 'solvent2variant': alchemical_path},
collections.OrderedDict([('a', alchemical_path), ('z', alchemical_path)]),
collections.OrderedDict([('z', alchemical_path), ('a', alchemical_path)])
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding'] = protocol
yield YamlBuilder._validate_protocols, modified_protocol
sorted_protocol = YamlBuilder._validate_protocols(modified_protocol)['absolute-binding']
if isinstance(protocol, collections.OrderedDict):
assert sorted_protocol.keys() == protocol.keys()
else:
assert isinstance(sorted_protocol, collections.OrderedDict)
first_phase = next(iter(sorted_protocol.keys())) # py2/3 compatible
assert 'complex' in first_phase or 'solvent1' in first_phase
def test_validation_wrong_protocols():
"""YAML validation raises exception with wrong alchemical protocols."""
basic_protocol = yank_load(standard_protocol)
# Alchemical paths
protocols = [
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 'wrong!']},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 11000.0]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, -0.5]},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': 0.0},
{'lambda_electrostatics': [1.0, 0.8, 0.6, 0.3, 0.0], 'lambda_sterics': [1.0, 0.8, 0.6, 0.3, 0.0], 3: 2}
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding']['complex']['alchemical_path'] = protocol
yield assert_raises, YamlParseError, YamlBuilder._validate_protocols, modified_protocol
# Phases
alchemical_path = copy.deepcopy(basic_protocol['absolute-binding']['complex'])
protocols = [
{'complex': alchemical_path},
{2: alchemical_path, 'solvent': alchemical_path},
{'complex': alchemical_path, 'solvent': alchemical_path, 'thirdphase': alchemical_path},
{'my-complex-solvent': alchemical_path, 'my-solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-complex-solvent': alchemical_path},
{'my-complex': alchemical_path, 'my-complex': alchemical_path},
{'complex': alchemical_path, 'solvent1': alchemical_path, 'solvent2': alchemical_path},
{'my-phase1': alchemical_path, 'my-phase2': alchemical_path},
collections.OrderedDict([('my-phase1', alchemical_path), ('my-phase2', alchemical_path),
('my-phase3', alchemical_path)])
]
for protocol in protocols:
modified_protocol = copy.deepcopy(basic_protocol)
modified_protocol['absolute-binding'] = protocol
yield assert_raises, YamlParseError, YamlBuilder._validate_protocols, modified_protocol
def test_validation_correct_experiments():
"""YAML validation raises exception with wrong experiments specification."""
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
systems:
sys: {{receptor: rec, ligand: lig, solvent: solv}}
protocols:{}
""".format(examples_paths()['lysozyme'], standard_protocol)
basic_script = yank_load(basic_script)
experiments = [
{'system': 'sys', 'protocol': 'absolute-binding'},
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {'type': 'Harmonic'}},
{'system': 'sys', 'protocol': 'absolute-binding', 'restraint': {'type': None}}
]
for experiment in experiments:
modified_script = basic_script.copy()
modified_script['experiments'] = experiment
yield yaml_builder.parse, modified_script
def test_validation_wrong_experiments():
"""YAML validation raises exception with wrong experiments specification."""
yaml_builder = YamlBuilder()
basic_script = """
---
molecules:
rec: {{filepath: {}, leap: {{parameters: oldff/leaprc.ff14SB}}}}
lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}
solvents:
solv: {{nonbonded_method: NoCutoff}}
systems:
sys: {{receptor: rec, ligand: lig, solvent: solv}}
protocols:{}
""".format(examples_paths()['lysozyme'], standard_protocol)
basic_script = yank_load(basic_script)
experiments = [
{'system': 'unknownsys', 'protocol': 'absolute-binding'},
{'system': 'sys', 'protocol': 'unknownprotocol'},
{'system': 'sys'},
{'protocol': 'absolute-binding'}
]
for experiment in experiments:
modified_script = basic_script.copy()
modified_script['experiments'] = experiment
yield assert_raises, YamlParseError, yaml_builder.parse, modified_script
# ==============================================================================
# Molecules pipeline
# ==============================================================================
def test_yaml_mol2_antechamber():
"""Test antechamber setup of molecule files."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
yaml_builder._db._setup_molecules('benzene')
output_dir = yaml_builder._db.get_molecule_dir('benzene')
gaff_path = os.path.join(output_dir, 'benzene.gaff.mol2')
frcmod_path = os.path.join(output_dir, 'benzene.frcmod')
# Get last modified time
last_touched_gaff = os.stat(gaff_path).st_mtime
last_touched_frcmod = os.stat(frcmod_path).st_mtime
# Check that output files have been created
assert os.path.exists(gaff_path)
assert os.path.exists(frcmod_path)
assert os.path.getsize(gaff_path) > 0
assert os.path.getsize(frcmod_path) > 0
# Check that setup_molecules do not recreate molecule files
time.sleep(0.5) # st_mtime doesn't have much precision
yaml_builder._db._setup_molecules('benzene')
assert last_touched_gaff == os.stat(gaff_path).st_mtime
assert last_touched_frcmod == os.stat(frcmod_path).st_mtime
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_setup_name_smiles_openeye_charges():
"""Setup molecule from name and SMILES with openeye charges and gaff."""
with omt.utils.temporary_directory() as tmp_dir:
molecules_ids = ['toluene-smiles', 'p-xylene-name']
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
yaml_builder._db._setup_molecules(*molecules_ids)
for mol in molecules_ids:
output_dir = yaml_builder._db.get_molecule_dir(mol)
output_basepath = os.path.join(output_dir, mol)
# Check that all the files have been created
assert os.path.exists(output_basepath + '.mol2')
assert os.path.exists(output_basepath + '.gaff.mol2')
assert os.path.exists(output_basepath + '.frcmod')
assert os.path.getsize(output_basepath + '.mol2') > 0
assert os.path.getsize(output_basepath + '.gaff.mol2') > 0
assert os.path.getsize(output_basepath + '.frcmod') > 0
atoms_frame, _ = mol2_to_dataframes(output_basepath + '.mol2')
input_charges = atoms_frame['charge']
atoms_frame, _ = mol2_to_dataframes(output_basepath + '.gaff.mol2')
output_charges = atoms_frame['charge']
# With openeye:am1bcc charges, the final charges should be unaltered
if mol == 'p-xylene-name':
assert input_charges.equals(output_charges)
else: # With antechamber, sqm should alter the charges a little
assert not input_charges.equals(output_charges)
# Check that molecules are resumed correctly
yaml_builder = YamlBuilder(yaml_content)
yaml_builder._db._setup_molecules(*molecules_ids)
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_clashing_atoms():
"""Check that clashing atoms are resolved."""
benzene_path = examples_paths()['benzene']
toluene_path = examples_paths()['toluene']
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
system_id = 'explicit-system'
system_description = yaml_content['systems'][system_id]
system_description['pack'] = True
system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])
# Sanity check: at the beginning molecules clash
toluene_pos = utils.get_oe_mol_positions(utils.read_oe_molecule(toluene_path))
benzene_pos = utils.get_oe_mol_positions(utils.read_oe_molecule(benzene_path))
assert compute_min_dist(toluene_pos, benzene_pos) < SetupDatabase.CLASH_THRESHOLD
yaml_builder = YamlBuilder(yaml_content)
for system_id in [system_id + '_vacuum', system_id + '_PME']:
system_dir = os.path.dirname(
yaml_builder._db.get_system(system_id)[0].position_path)
# Get positions of molecules in the final system
prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))
inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))
positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')
benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)
toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)
# Test that clashes are resolved in the system
min_dist, max_dist = compute_dist_bound(toluene_pos2, benzene_pos2)
assert min_dist >= SetupDatabase.CLASH_THRESHOLD
# For solvent we check that molecule is within the box
if system_id == system_id + '_PME':
assert max_dist <= yaml_content['solvents']['PME']['clearance']
@unittest.skipIf(not omt.schrodinger.is_schrodinger_suite_installed(),
"This test requires Schrodinger's suite")
def test_epik_enumeration():
"""Test epik protonation state enumeration."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
mol_ids = ['benzene-epik0', 'benzene-epikcustom']
yaml_builder._db._setup_molecules(*mol_ids)
for mol_id in mol_ids:
output_dir = yaml_builder._db.get_molecule_dir(mol_id)
output_basename = os.path.join(output_dir, mol_id + '-epik.')
assert os.path.exists(output_basename + 'mol2')
assert os.path.getsize(output_basename + 'mol2') > 0
assert os.path.exists(output_basename + 'sdf')
assert os.path.getsize(output_basename + 'sdf') > 0
def test_strip_protons():
"""Test that protons are stripped correctly for tleap."""
mol_id = 'Abl'
abl_path = examples_paths()['abl']
with omt.utils.temporary_directory() as tmp_dir:
# Safety check: protein must have protons
has_hydrogen = False
with open(abl_path, 'r') as f:
for line in f:
if line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H'):
has_hydrogen = True
break
assert has_hydrogen
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
output_dir = yaml_builder._db.get_molecule_dir(mol_id)
output_path = os.path.join(output_dir, 'Abl.pdb')
# We haven't set the strip_protons options, so this shouldn't do anything
yaml_builder._db._setup_molecules(mol_id)
assert not os.path.exists(output_path)
# Now we set the strip_protons options and repeat
yaml_builder._db.molecules[mol_id]['strip_protons'] = True
yaml_builder._db._setup_molecules(mol_id)
assert os.path.exists(output_path)
assert os.path.getsize(output_path) > 0
# The new pdb does not have hydrogen atoms
has_hydrogen = False
with open(output_path, 'r') as f:
for line in f:
if line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H'):
has_hydrogen = True
break
assert not has_hydrogen
# ==============================================================================
# Combinatorial expansion
# ==============================================================================
class TestMultiMoleculeFiles():
@classmethod
def setup_class(cls):
"""Create a 2-frame PDB file in pdb_path. The second frame has same positions
of the first one but with inversed z-coordinate."""
# Creating a temporary directory and generating paths for output files
cls.tmp_dir = tempfile.mkdtemp()
cls.pdb_path = os.path.join(cls.tmp_dir, 'multipdb.pdb')
cls.smiles_path = os.path.join(cls.tmp_dir, 'multismiles.smiles')
cls.sdf_path = os.path.join(cls.tmp_dir, 'multisdf.sdf')
cls.mol2_path = os.path.join(cls.tmp_dir, 'multimol2.mol2')
# Rotation matrix to invert z-coordinate, i.e. flip molecule w.r.t. x-y plane
rot = np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]])
# Create 2-frame PDB file. First frame is lysozyme, second is lysozyme with inverted z
lysozyme_path = examples_paths()['lysozyme']
lysozyme = PDBFile(lysozyme_path)
# Rotate positions to invert z for the second frame
symmetric_pos = lysozyme.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
symmetric_pos = symmetric_pos.dot(rot) * unit.angstrom
with open(cls.pdb_path, 'w') as f:
PDBFile.writeHeader(lysozyme.topology, file=f)
PDBFile.writeModel(lysozyme.topology, lysozyme.positions, file=f, modelIndex=0)
PDBFile.writeModel(lysozyme.topology, symmetric_pos, file=f, modelIndex=1)
# Create 2-molecule SMILES file
with open(cls.smiles_path, 'w') as f:
f.write('benzene,c1ccccc1\n')
f.write('toluene,Cc1ccccc1\n')
# Create 2-molecule sdf and mol2 with OpenEye
if utils.is_openeye_installed():
from openeye import oechem
oe_benzene = utils.read_oe_molecule(examples_paths()['benzene'])
oe_benzene_pos = utils.get_oe_mol_positions(oe_benzene).dot(rot)
oe_benzene.NewConf(oechem.OEFloatArray(oe_benzene_pos.flatten()))
# Save 2-conformer benzene in sdf and mol2 format
utils.write_oe_molecule(oe_benzene, cls.sdf_path)
utils.write_oe_molecule(oe_benzene, cls.mol2_path, mol2_resname='MOL')
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.tmp_dir)
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_expand_molecules(self):
"""Check that combinatorial molecules are handled correctly."""
yaml_content = """
---
molecules:
rec:
filepath: !Combinatorial [{}, {}]
leap: {{parameters: oldff/leaprc.ff14SB}}
lig:
name: !Combinatorial [iupac1, iupac2]
leap: {{parameters: leaprc.gaff}}
epik:
select: !Combinatorial [0, 2]
multi:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: all
smiles:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
sdf:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
mol2:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: all
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
sys:
receptor: !Combinatorial [rec, multi]
ligand: lig
solvent: !Combinatorial [solv1, solv2]
experiments:
system: sys
protocol: absolute-binding
""".format(self.sdf_path, self.mol2_path, self.pdb_path,
self.smiles_path, self.sdf_path, self.mol2_path,
indent(indent(standard_protocol)))
yaml_content = textwrap.dedent(yaml_content)
expected_content = """
---
molecules:
rec_multisdf:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
rec_multimol2:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
lig_0_iupac1:
name: iupac1
leap: {{parameters: leaprc.gaff}}
epik: {{select: 0}}
lig_2_iupac1:
name: iupac1
leap: {{parameters: leaprc.gaff}}
epik: {{select: 2}}
lig_0_iupac2:
name: iupac2
leap: {{parameters: leaprc.gaff}}
epik: {{select: 0}}
lig_2_iupac2:
name: iupac2
leap: {{parameters: leaprc.gaff}}
epik: {{select: 2}}
multi_0:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 0
multi_1:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 1
smiles_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
smiles_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
sdf_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
sdf_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
mol2_0:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 0
mol2_1:
filepath: {}
leap: {{parameters: leaprc.gaff}}
select: 1
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
sys:
receptor: !Combinatorial [rec_multimol2, rec_multisdf, multi_0, multi_1]
ligand: !Combinatorial [lig_0_iupac1, lig_0_iupac2, lig_2_iupac1, lig_2_iupac2]
solvent: !Combinatorial [solv1, solv2]
experiments:
system: sys
protocol: absolute-binding
""".format(self.sdf_path, self.mol2_path, self.pdb_path, self.pdb_path,
self.smiles_path, self.smiles_path, self.sdf_path, self.sdf_path,
self.mol2_path, self.mol2_path, indent(standard_protocol))
expected_content = textwrap.dedent(expected_content)
raw = yank_load(yaml_content)
expanded = YamlBuilder(yaml_content)._expand_molecules(raw)
expected = yank_load(expected_content)
assert expanded == expected, 'Expected:\n{}\n\nExpanded:\n{}'.format(
expected['systems'], expanded['systems'])
def test_select_pdb_conformation(self):
"""Check that frame selection in multi-model PDB files works."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
selected:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 1
""".format(tmp_dir, self.pdb_path)
yaml_content = textwrap.dedent(yaml_content)
yaml_builder = YamlBuilder(yaml_content)
# The molecule now is neither set up nor processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup('selected')
assert is_setup is False
assert is_processed is False
# The setup of the molecule must isolate the frame in a single-frame PDB
yaml_builder._db._setup_molecules('selected')
selected_pdb_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
'selected', 'selected.pdb')
assert os.path.exists(os.path.join(selected_pdb_path))
assert os.path.getsize(os.path.join(selected_pdb_path)) > 0
# The positions must be the ones of the second frame
selected_pdb = PDBFile(selected_pdb_path)
selected_pos = selected_pdb.getPositions(asNumpy=True)
second_pos = PDBFile(self.pdb_path).getPositions(asNumpy=True, frame=1)
assert selected_pdb.getNumFrames() == 1
assert (selected_pos == second_pos).all()
# The description of the molecule is now updated
assert os.path.normpath(yaml_builder._db.molecules['selected']['filepath']) == selected_pdb_path
# The molecule now both set up and processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup('selected')
assert is_setup is True
assert is_processed is True
# A new instance of YamlBuilder is able to resume with correct molecule
yaml_builder = YamlBuilder(yaml_content)
is_setup, is_processed = yaml_builder._db.is_molecule_setup('selected')
assert is_setup is True
assert is_processed is True
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_setup_smiles(self):
"""Check that setup molecule from SMILES files works."""
from openeye.oechem import OEMolToSmiles
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
take-first:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select-second:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
""".format(tmp_dir, self.smiles_path, self.smiles_path)
yaml_content = textwrap.dedent(yaml_content)
yaml_builder = YamlBuilder(yaml_content)
for i, mol_id in enumerate(['take-first', 'select-second']):
# The molecule now is neither set up nor processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is False
assert is_processed is False
# The single SMILES has been converted to mol2 file
yaml_builder._db._setup_molecules(mol_id)
mol2_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR, mol_id, mol_id + '.mol2')
assert os.path.exists(os.path.join(mol2_path))
assert os.path.getsize(os.path.join(mol2_path)) > 0
# The mol2 represents the right molecule
csv_smiles_str = (open(self.smiles_path, 'r').readlines()[i]).strip().split(',')[1]
mol2_smiles_str = OEMolToSmiles(utils.read_oe_molecule(mol2_path))
assert mol2_smiles_str == csv_smiles_str
# The molecule now both set up and processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
# A new instance of YamlBuilder is able to resume with correct molecule
yaml_builder = YamlBuilder(yaml_content)
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
@unittest.skipIf(not utils.is_openeye_installed(), 'This test requires OpenEye installed.')
def test_select_sdf_mol2(self):
"""Check that selection in sdf and mol2 files works."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
output_dir: {}
setup_dir: .
molecules:
sdf_0:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 0
sdf_1:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
mol2_0:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 0
mol2_1:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
select: 1
""".format(tmp_dir, self.sdf_path, self.sdf_path, self.mol2_path, self.mol2_path)
yaml_content = textwrap.dedent(yaml_content)
yaml_builder = YamlBuilder(yaml_content)
for extension in ['sdf', 'mol2']:
multi_path = getattr(self, extension + '_path')
for model_idx in [0, 1]:
mol_id = extension + '_' + str(model_idx)
# The molecule now is neither set up nor processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is False
assert is_processed is False
yaml_builder._db._setup_molecules(mol_id)
# The setup of the molecule must isolate the frame in a single-frame PDB
single_mol_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.' + extension)
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# sdf files must be converted to mol2 to be fed to antechamber
if extension == 'sdf':
single_mol_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.mol2')
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# Check antechamber parametrization
single_mol_path = os.path.join(tmp_dir, SetupDatabase.MOLECULES_DIR,
mol_id, mol_id + '.gaff.mol2')
assert os.path.exists(os.path.join(single_mol_path))
assert os.path.getsize(os.path.join(single_mol_path)) > 0
# The positions must be approximately correct (antechamber move the molecule)
selected_oe_mol = utils.read_oe_molecule(single_mol_path)
selected_pos = utils.get_oe_mol_positions(selected_oe_mol)
second_oe_mol = utils.read_oe_molecule(multi_path, conformer_idx=model_idx)
second_pos = utils.get_oe_mol_positions(second_oe_mol)
assert selected_oe_mol.NumConfs() == 1
assert np.allclose(selected_pos, second_pos, atol=1e-1)
# The molecule now both set up and processed
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
# A new instance of YamlBuilder is able to resume with correct molecule
yaml_builder = YamlBuilder(yaml_content)
is_setup, is_processed = yaml_builder._db.is_molecule_setup(mol_id)
assert is_setup is True
assert is_processed is True
def test_system_expansion():
"""Combinatorial systems are correctly expanded."""
# We need 2 combinatorial systems
template_script = get_template_script()
template_system = template_script['systems']['implicit-system']
del template_system['leap']
template_script['systems'] = {'system1': template_system.copy(),
'system2': template_system.copy()}
template_script['systems']['system1']['receptor'] = utils.CombinatorialLeaf(['Abl', 'T4Lysozyme'])
template_script['systems']['system2']['ligand'] = utils.CombinatorialLeaf(['p-xylene', 'toluene'])
template_script['experiments']['system'] = utils.CombinatorialLeaf(['system1', 'system2'])
# Expected expanded script
expected_script = yank_load("""
systems:
system1_Abl: {receptor: Abl, ligand: p-xylene, solvent: GBSA-OBC2}
system1_T4Lysozyme: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}
system2_pxylene: {receptor: T4Lysozyme, ligand: p-xylene, solvent: GBSA-OBC2}
system2_toluene: {receptor: T4Lysozyme, ligand: toluene, solvent: GBSA-OBC2}
experiments:
system: !Combinatorial ['system1_Abl', 'system1_T4Lysozyme', 'system2_pxylene', 'system2_toluene']
protocol: absolute-binding
""")
expanded_script = template_script.copy()
expanded_script['systems'] = expected_script['systems']
expanded_script['experiments'] = expected_script['experiments']
assert YamlBuilder(template_script)._expand_systems(template_script) == expanded_script
def test_exp_sequence():
"""Test all experiments in a sequence are parsed."""
yaml_content = """
---
molecules:
rec:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
lig:
name: lig
leap: {{parameters: leaprc.gaff}}
solvents:
solv1:
nonbonded_method: NoCutoff
solv2:
nonbonded_method: PME
nonbonded_cutoff: 1*nanometer
clearance: 10*angstroms
protocols:{}
systems:
system1:
receptor: rec
ligand: lig
solvent: !Combinatorial [solv1, solv2]
system2:
receptor: rec
ligand: lig
solvent: solv1
experiment1:
system: system1
protocol: absolute-binding
experiment2:
system: system2
protocol: absolute-binding
experiments: [experiment1, experiment2]
""".format(examples_paths()['lysozyme'], standard_protocol)
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
assert len(yaml_builder._experiments) == 2
# ==============================================================================
# Systems pipeline
# ==============================================================================
def test_setup_implicit_system_leap():
"""Create prmtop and inpcrd for implicit solvent protein-ligand system."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
output_dir = os.path.dirname(
yaml_builder._db.get_system('implicit-system')[0].position_path)
last_modified_path = os.path.join(output_dir, 'complex.prmtop')
last_modified = os.stat(last_modified_path).st_mtime
# Test that output files exist and there is no water
for phase in ['complex', 'solvent']:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert 'MOL' in found_resnames
assert 'WAT' not in found_resnames
# Test that another call do not regenerate the system
time.sleep(0.5) # st_mtime doesn't have much precision
yaml_builder._db.get_system('implicit-system')
assert last_modified == os.stat(last_modified_path).st_mtime
def test_setup_explicit_system_leap():
"""Create prmtop and inpcrd protein-ligand system in explicit solvent."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_builder = YamlBuilder(yaml_content)
output_dir = os.path.dirname(
yaml_builder._db.get_system('explicit-system')[0].position_path)
# Test that output file exists and that there is water
expected_resnames = {'complex': set(['BEN', 'TOL', 'WAT']),
'solvent': set(['TOL', 'WAT'])}
for phase in expected_resnames:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames .add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert found_resnames == expected_resnames[phase]
def test_neutralize_system():
"""Test whether the system charge is neutralized correctly."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = get_template_script(tmp_dir)
yaml_content['systems']['explicit-system']['receptor'] = 'T4Lysozyme'
yaml_content['systems']['explicit-system']['ligand'] = 'p-xylene'
yaml_builder = YamlBuilder(yaml_content)
output_dir = os.path.dirname(
yaml_builder._db.get_system('explicit-system')[0].position_path)
# Test that output file exists and that there are ions
found_resnames = set()
with open(os.path.join(output_dir, 'complex.pdb'), 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
assert set(['MOL', 'WAT', 'Cl-']) <= found_resnames
# Check that parameter files exist
prmtop_path = os.path.join(output_dir, 'complex.prmtop')
inpcrd_path = os.path.join(output_dir, 'complex.inpcrd')
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
@unittest.skipIf(not utils.is_openeye_installed(), "This test requires OpenEye toolkit")
def test_charged_ligand():
"""Check that there are alchemical counterions for charged ligands."""
imatinib_path = examples_paths()['imatinib']
with omt.utils.temporary_directory() as tmp_dir:
receptors = {'Asp': -1, 'Abl': -8} # receptor name -> net charge
updates = yank_load("""
molecules:
Asp:
name: "(3S)-3-amino-4-hydroxy-4-oxo-butanoate"
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
imatinib:
filepath: {}
openeye: {{quacpac: am1-bcc}}
antechamber: {{charge_method: null}}
explicit-system:
receptor: !Combinatorial {}
ligand: imatinib
""".format(imatinib_path, list(receptors.keys())))
yaml_content = get_template_script(tmp_dir)
yaml_content['molecules'].update(updates['molecules'])
yaml_content['systems']['explicit-system'].update(updates['explicit-system'])
yaml_builder = YamlBuilder(yaml_content)
for receptor in receptors:
system_files_paths = yaml_builder._db.get_system('explicit-system_' + receptor)
for i, phase_name in enumerate(['complex', 'solvent']):
inpcrd_file_path = system_files_paths[i].position_path
prmtop_file_path = system_files_paths[i].parameters_path
phase = pipeline.prepare_phase(inpcrd_file_path, prmtop_file_path, 'resname MOL',
{'nonbondedMethod': openmm.app.PME})
# Safety check: receptor must be negatively charged as expected
if phase_name == 'complex':
receptor_net_charge = pipeline.compute_net_charge(phase.reference_system,
phase.atom_indices['receptor'])
assert receptor_net_charge == receptors[receptor]
# 'ligand_counterions' component contain one cation
assert len(phase.atom_indices['ligand_counterions']) == 1
ion_idx = phase.atom_indices['ligand_counterions'][0]
ion_atom = next(itertools.islice(phase.reference_topology.atoms(), ion_idx, None))
assert '-' in ion_atom.residue.name
# In complex, there should be both ions even if the system is globally
# neutral (e.g. asp lys system), because of the alchemical ion
found_resnames = set()
output_dir = os.path.dirname(system_files_paths[0].position_path)
with open(os.path.join(output_dir, phase_name + '.pdb'), 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
if phase_name == 'complex':
assert set(['Na+', 'Cl-']) <= found_resnames
else:
assert set(['Cl-']) <= found_resnames
def test_setup_explicit_solvation_system():
"""Create prmtop and inpcrd files for solvation free energy in explicit solvent."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['systems'] = {
'system1':
{'solute': 'toluene', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': ['leaprc.gaff', 'oldff/leaprc.ff14SB']}}}
del yaml_script['experiments']
yaml_builder = YamlBuilder(yaml_script)
output_dir = os.path.dirname(
yaml_builder._db.get_system('system1')[0].position_path)
# Test that output file exists and that it has correct components
expected_resnames = {'solvent1': set(['TOL', 'WAT']), 'solvent2': set(['TOL'])}
for phase in expected_resnames:
found_resnames = set()
pdb_path = os.path.join(output_dir, phase + '.pdb')
prmtop_path = os.path.join(output_dir, phase + '.prmtop')
inpcrd_path = os.path.join(output_dir, phase + '.inpcrd')
with open(pdb_path, 'r') as f:
for line in f:
if len(line) > 10 and line[:5] != 'CRYST':
found_resnames.add(line[17:20])
assert os.path.exists(prmtop_path)
assert os.path.exists(inpcrd_path)
assert os.path.getsize(prmtop_path) > 0
assert os.path.getsize(inpcrd_path) > 0
assert found_resnames == expected_resnames[phase]
def test_setup_multiple_parameters_system():
"""Set up system with molecule that needs many parameter files."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
# Force antechamber parametrization of benzene to output frcmod file
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._db._setup_molecules('benzene')
benzene_dir = yaml_builder._db.get_molecule_dir('benzene')
frcmod_path = os.path.join(benzene_dir, 'benzene.frcmod')
benzene_path = os.path.join(benzene_dir, 'benzene.gaff.mol2')
# Redefine benzene to use leaprc.gaff and benzene.frcmod
# and set up system for hydration free energy calculation
yaml_script['molecules'] = {
'benzene-frcmod': {'filepath': benzene_path,
'leap': {'parameters': ['leaprc.gaff', frcmod_path]}}}
yaml_script['systems'] = {
'system':
{'solute': 'benzene-frcmod', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': 'oldff/leaprc.ff14SB'}}
}
del yaml_script['experiments']
yaml_builder = YamlBuilder(yaml_script)
system_files_path = yaml_builder._db.get_system('system')
# Check that output exist:
for phase in system_files_path:
assert os.path.exists(phase.parameters_path)
assert os.path.exists(phase.position_path)
assert os.path.getsize(phase.parameters_path) > 0
assert os.path.getsize(phase.position_path) > 0
# ==============================================================================
# Platform configuration tests
# ==============================================================================
def test_select_fastest_platform():
"""Test that YamlBuilder select the fastest platform available when unspecified."""
available_platforms = [openmm.Platform.getPlatform(i).getName()
for i in range(openmm.Platform.getNumPlatforms())]
if 'CUDA' in available_platforms:
fastest_platform = 'CUDA'
elif 'OpenCL' in available_platforms:
fastest_platform = 'OpenCL'
else:
fastest_platform = 'CPU'
platform = YamlBuilder._determine_fastest_platform()
assert platform.getName() == fastest_platform
def test_platform_precision_configuration():
"""Test that the precision for platform is configured correctly."""
available_platforms = [openmm.Platform.getPlatform(i).getName()
for i in range(openmm.Platform.getNumPlatforms())]
for platform_name in available_platforms:
yaml_builder = YamlBuilder(yaml_source='options: {}')
# Reference and CPU platform support only one precision model
if platform_name == 'Reference':
assert_raises(RuntimeError, yaml_builder._configure_platform, platform_name, 'mixed')
continue
elif platform_name == 'CPU':
assert_raises(RuntimeError, yaml_builder._configure_platform, platform_name, 'double')
continue
# Check that precision is set as expected
for precision in ['mixed', 'double', 'single']:
if platform_name == 'CUDA':
platform = yaml_builder._configure_platform(platform_name=platform_name,
platform_precision=precision)
assert platform.getPropertyDefaultValue('CudaPrecision') == precision
elif platform_name == 'OpenCL':
if YamlBuilder._opencl_device_support_precision(precision):
platform = yaml_builder._configure_platform(platform_name=platform_name,
platform_precision=precision)
assert platform.getPropertyDefaultValue('OpenCLPrecision') == precision
else:
assert_raises(RuntimeError, yaml_builder._configure_platform, platform_name, precision)
def test_default_platform_precision():
"""Test that the precision for platform is set to mixed by default."""
available_platforms = [openmm.Platform.getPlatform(i).getName()
for i in range(openmm.Platform.getNumPlatforms())]
# Determine whether this device OpenCL platform supports double precision
if 'OpenCL' in available_platforms:
opencl_support_double = YamlBuilder._opencl_device_support_precision('double')
for platform_name in available_platforms:
# Reference and CPU platform support only one precision model so we don't
# explicitly test them. We still call _configure_platform to be sure that
# precision 'auto' works
yaml_builder = YamlBuilder(yaml_source='options: {}')
platform = yaml_builder._configure_platform(platform_name=platform_name,
platform_precision='auto')
if platform_name == 'CUDA':
assert platform.getPropertyDefaultValue('CudaPrecision') == 'mixed'
elif platform_name == 'OpenCL':
if opencl_support_double:
assert platform.getPropertyDefaultValue('OpenCLPrecision') == 'mixed'
else:
assert platform.getPropertyDefaultValue('OpenCLPrecision') == 'single'
# ==============================================================================
# Experiment execution
# ==============================================================================
def test_yaml_creation():
"""Test the content of generated single experiment YAML files."""
ligand_path = examples_paths()['p-xylene']
toluene_path = examples_paths()['toluene']
with omt.utils.temporary_directory() as tmp_dir:
molecules = """
T4lysozyme:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}""".format(examples_paths()['lysozyme'])
solvent = """
vacuum:
nonbonded_method: NoCutoff"""
protocol = indent(standard_protocol)
system = """
system:
ligand: p-xylene
receptor: T4lysozyme
solvent: vacuum"""
experiment = """
protocol: absolute-binding
system: system"""
yaml_content = """
---
options:
output_dir: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
benzene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
systems:{}
protocols:{}
experiments:{}
""".format(os.path.relpath(tmp_dir), molecules,
os.path.relpath(ligand_path), toluene_path,
solvent, system, protocol, experiment)
# We need to check whether the relative paths to the output directory and
# for p-xylene are handled correctly while absolute paths (T4lysozyme) are
# left untouched
expected_yaml_content = textwrap.dedent("""
---
version: '{}'
options:
experiments_dir: .
output_dir: .
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
systems:{}
protocols:{}
experiments:{}
""".format(HIGHEST_VERSION, molecules, os.path.relpath(ligand_path, tmp_dir),
solvent, system, protocol, experiment))
expected_yaml_content = expected_yaml_content[1:] # remove first '\n'
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
# during setup we can modify molecule's fields, so we need
# to check that it doesn't affect the YAML file exported
experiment_dict = yaml.load(experiment)
yaml_builder._db.get_system(experiment_dict['system'])
generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')
yaml_builder._generate_yaml(experiment_dict, generated_yaml_path)
with open(generated_yaml_path, 'r') as f:
assert yaml.load(f) == yank_load(expected_yaml_content)
def test_yaml_extension():
"""Test that extending a yaml content with additional data produces the correct fusion"""
ligand_path = examples_paths()['p-xylene']
toluene_path = examples_paths()['toluene']
with omt.utils.temporary_directory() as tmp_dir:
molecules = """
T4lysozyme:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}""".format(examples_paths()['lysozyme'])
solvent = """
vacuum:
nonbonded_method: NoCutoff"""
protocol = indent(standard_protocol)
system = """
system:
ligand: p-xylene
receptor: T4lysozyme
solvent: vacuum"""
experiment = """
protocol: absolute-binding
system: system"""
num_iterations = 5
replacement_solvent = "HTC"
yaml_content = """
---
options:
output_dir: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
benzene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
systems:{}
protocols:{}
experiments:{}
""".format(os.path.relpath(tmp_dir), molecules,
os.path.relpath(ligand_path), toluene_path,
solvent, system, protocol, experiment)
yaml_extension = """
options:
number_of_iterations: {}
solvents:
GBSA-OBC2:
implicit_solvent: HCT
""".format(num_iterations, replacement_solvent)
# We need to check whether the relative paths to the output directory and
# for p-xylene are handled correctly while absolute paths (T4lysozyme) are
# left untouched
expected_yaml_content = textwrap.dedent("""
---
version: '{}'
options:
experiments_dir: .
output_dir: .
number_of_iterations: {}
molecules:{}
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:{}
systems:{}
protocols:{}
experiments:{}
""".format(HIGHEST_VERSION, num_iterations, molecules, os.path.relpath(ligand_path, tmp_dir),
solvent, system, protocol, experiment))
expected_yaml_content = expected_yaml_content[1:] # remove first '\n'
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
yaml_builder.update_yaml(yaml_extension)
# during setup we can modify molecule's fields, so we need
# to check that it doesn't affect the YAML file exported
experiment_dict = yaml.load(experiment)
yaml_builder._db.get_system(experiment_dict['system'])
generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')
yaml_builder._generate_yaml(experiment_dict, generated_yaml_path)
with open(generated_yaml_path, 'r') as f:
assert yaml.load(f) == yank_load(expected_yaml_content)
def test_get_alchemical_path():
"""Check that conversion to list of AlchemicalStates is correct."""
yaml_content = """
---
protocols:{}
""".format(standard_protocol)
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
alchemical_paths = yaml_builder._get_alchemical_paths('absolute-binding')
assert len(alchemical_paths) == 2
assert 'complex' in alchemical_paths
assert 'solvent' in alchemical_paths
complex_path = alchemical_paths['complex']
assert isinstance(complex_path[0], AlchemicalState)
assert complex_path[3]['lambda_electrostatics'] == 0.6
assert complex_path[4]['lambda_sterics'] == 0.4
assert complex_path[5]['lambda_restraints'] == 1.0
assert len(complex_path) == 7
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_amber_files():
"""Test experiment run from prmtop/inpcrd files."""
complex_path = examples_paths()['bentol-complex']
solvent_path = examples_paths()['bentol-solvent']
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['options']['anisotropic_dispersion_correction'] = False
del yaml_script['molecules'] # we shouldn't need any molecule
del yaml_script['solvents']['PME']['clearance'] # we shouldn't need this
yaml_script['systems'] = {'explicit-system':
{'phase1_path': complex_path, 'phase2_path': solvent_path,
'ligand_dsl': 'resname TOL', 'solvent': 'PME'}}
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_gromacs_files():
"""Test experiment run from top/gro files."""
complex_path = examples_paths()['pxylene-complex']
solvent_path = examples_paths()['pxylene-solvent']
include_path = examples_paths()['pxylene-gro-include']
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['options']['anisotropic_dispersion_correction'] = False
del yaml_script['molecules'] # we shouldn't need any molecule
yaml_script['systems'] = {'explicit-system':
{'phase1_path': complex_path, 'phase2_path': solvent_path,
'ligand_dsl': 'resname "p-xylene"', 'solvent': 'PME',
'gromacs_include_dir': include_path}}
yaml_script['experiments']['system'] = 'explicit-system'
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment_from_xml_files():
"""Test hydration experiment run from pdb/xml files."""
solvent_path = examples_paths()['toluene-solvent']
vacuum_path = examples_paths()['toluene-vacuum']
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
del yaml_script['molecules'] # we shouldn't need any molecule
yaml_script['systems'] = {'explicit-system':
{'phase1_path': solvent_path, 'phase2_path': vacuum_path,
'ligand_dsl': 'resname TOL', 'solvent_dsl': 'not resname TOL'}}
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
@attr('slow') # Skip on Travis-CI
def test_run_experiment():
"""Test experiment run and resuming."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
resume_setup: no
resume_simulation: no
number_of_iterations: 1
output_dir: overwritten
annihilate_sterics: yes
molecules:
T4lysozyme:
filepath: {}
leap: {{parameters: oldff/leaprc.ff14SB}}
select: 0
p-xylene:
filepath: {}
antechamber: {{charge_method: bcc}}
leap: {{parameters: leaprc.gaff}}
solvents:
vacuum:
nonbonded_method: NoCutoff
GBSA-OBC2:
nonbonded_method: NoCutoff
implicit_solvent: OBC2
protocols:{}
systems:
system:
receptor: T4lysozyme
ligand: p-xylene
solvent: !Combinatorial [vacuum, GBSA-OBC2]
experiments:
system: system
options:
output_dir: {}
setup_dir: ''
experiments_dir: ''
protocol: absolute-binding
restraint:
type: FlatBottom
""".format(examples_paths()['lysozyme'], examples_paths()['p-xylene'],
indent(standard_protocol), tmp_dir)
yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
# Now check_setup_resume should not raise exceptions
yaml_builder._check_resume()
# We setup a molecule and with resume_setup: now we can't do the experiment
err_msg = ''
yaml_builder._db._setup_molecules('p-xylene')
try:
yaml_builder.build_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'molecule' in err_msg
# Same thing with a system
err_msg = ''
system_dir = os.path.dirname(
yaml_builder._db.get_system('system_GBSAOBC2')[0].position_path)
try:
yaml_builder.build_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'system' in err_msg
# Now we set resume_setup to True and things work
yaml_builder.options['resume_setup'] = True
ligand_dir = yaml_builder._db.get_molecule_dir('p-xylene')
frcmod_file = os.path.join(ligand_dir, 'p-xylene.frcmod')
prmtop_file = os.path.join(system_dir, 'complex.prmtop')
molecule_last_touched = os.stat(frcmod_file).st_mtime
system_last_touched = os.stat(prmtop_file).st_mtime
yaml_builder.build_experiments()
# Neither the system nor the molecule has been processed again
assert molecule_last_touched == os.stat(frcmod_file).st_mtime
assert system_last_touched == os.stat(prmtop_file).st_mtime
# The experiments folders are correctly named and positioned
for exp_name in ['systemvacuum', 'systemGBSAOBC2']:
# The output directory must be the one in the experiment section
output_dir = os.path.join(tmp_dir, exp_name)
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'complex.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent.nc'))
assert os.path.isfile(os.path.join(output_dir, exp_name + '.yaml'))
assert os.path.isfile(os.path.join(output_dir, exp_name + '.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['complex', 1], ['solvent', -1]]
# Now we can't run the experiment again with resume_simulation: no
try:
yaml_builder.build_experiments()
except YamlParseError as e:
err_msg = str(e)
assert 'experiment' in err_msg
# We set resume_simulation: yes and now things work
yaml_builder.options['resume_simulation'] = True
yaml_builder.build_experiments()
def test_run_solvation_experiment():
"""Test solvation free energy experiment run."""
with omt.utils.temporary_directory() as tmp_dir:
yaml_script = get_template_script(tmp_dir)
yaml_script['solvents']['PME']['clearance'] = '14*angstroms'
yaml_script['systems'] = {
'system1':
{'solute': 'toluene', 'solvent1': 'PME', 'solvent2': 'vacuum',
'leap': {'parameters': ['leaprc.gaff', 'oldff/leaprc.ff14SB']}}}
protocol = yaml_script['protocols']['absolute-binding']['solvent']
yaml_script['protocols'] = {
'hydration-protocol': {
'solvent1': protocol,
'solvent2': protocol
}
}
yaml_script['experiments'] = {
'system': 'system1',
'protocol': 'hydration-protocol'
}
yaml_builder = YamlBuilder(yaml_script)
yaml_builder._check_resume() # check_resume should not raise exceptions
yaml_builder.build_experiments()
# The experiments folders are correctly named and positioned
output_dir = yaml_builder._get_experiment_dir(yaml_builder.options, '')
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'solvent1.nc'))
assert os.path.isfile(os.path.join(output_dir, 'solvent2.nc'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.yaml'))
assert os.path.isfile(os.path.join(output_dir, 'experiments.log'))
# Analysis script is correct
analysis_script_path = os.path.join(output_dir, 'analysis.yaml')
with open(analysis_script_path, 'r') as f:
assert yaml.load(f) == [['solvent1', 1], ['solvent2', -1]]
# Check that solvated phase has a barostat.
from netCDF4 import Dataset
ncfile = Dataset(os.path.join(output_dir, 'solvent1.nc'), 'r')
ncgrp_stateinfo = ncfile.groups['thermodynamic_states']
system = openmm.System()
system.__setstate__(str(ncgrp_stateinfo.variables['base_system'][0]))
has_barostat = False
for force in system.getForces():
if force.__class__.__name__ == 'MonteCarloBarostat':
has_barostat = True
if not has_barostat:
raise Exception('Explicit solvent phase of hydration free energy calculation does not have a barostat.')
if __name__ == '__main__':
test_run_solvation_experiment()
|
pgrinaway/yank
|
Yank/tests/test_yaml.py
|
Python
|
lgpl-3.0
| 90,008
|
[
"Gromacs",
"MDTraj",
"OpenMM"
] |
fc9da92d5d97e536a788d9e54dbd9d295aa4e8ee6a2017b13a6f0a5d6280bffc
|
# This file is generated by /galaxy/home/mgehrin/test/tmp/numexpr-2.4.3/setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/numexpr-2.4.3/build/src.linux-x86_64-2.7/numexpr/__config__.py
|
Python
|
bsd-3-clause
| 697
|
[
"Galaxy"
] |
812d5e639263ad5230126f541a7847d2ac450cd43da41d47136797fcdce728be
|
import pytest
from .addons import using
from .utils import *
import math
import numpy as np
import qcelemental as qcel
import psi4
from psi4.driver import qcdb
pytestmark = pytest.mark.quick
def hide_test_xtpl_fn_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy('cbs', scf_basis='cc-pvdz', scf_scheme=psi4.driver_cbs.xtpl_highest_1)
assert 'Replace extrapolation function with function name' in str(e.value)
def hide_test_xtpl_cbs_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy(psi4.cbs, scf_basis='cc-pvdz')
#psi4.energy(psi4.driver.driver_cbs.complete_basis_set, scf_basis='cc-pvdz')
assert 'Replace cbs or complete_basis_set function with cbs string' in str(e.value)
@pytest.mark.parametrize("inp,out", [
((2, 'C2V'), 2),
(('A2', 'c2v'), 2),
(('2', 'C2V'), 2),
])
def test_parse_cotton_irreps(inp, out):
idx = psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert idx == out
@pytest.mark.parametrize("inp", [
((5, 'cs')),
(('5', 'cs')),
((0, 'cs')),
(('a2', 'cs')),
])
def test_parse_cotton_irreps_error(inp):
with pytest.raises(psi4.ValidationError) as e:
psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert 'not valid for point group' in str(e.value)
# <<< TODO Deprecated! Delete in Psi4 v1.5 >>>
@using("networkx")
def test_deprecated_qcdb_align_b787():
soco10 = """
O 1.0 0.0 0.0
C 0.0 0.0 0.0
O -1.0 0.0 0.0
units ang
"""
sooc12 = """
O 1.2 4.0 0.0
O -1.2 4.0 0.0
C 0.0 4.0 0.0
units ang
"""
ref_rmsd = math.sqrt(2. * 0.2 * 0.2 / 3.) # RMSD always in Angstroms
oco10 = qcel.molparse.from_string(soco10)
oco12 = qcel.molparse.from_string(sooc12)
oco10_geom_au = oco10['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
oco12_geom_au = oco12['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
with pytest.warns(FutureWarning) as err:
rmsd, mill = qcdb.align.B787(
oco10_geom_au, oco12_geom_au, np.array(['O', 'C', 'O']), np.array(['O', 'O', 'C']), verbose=4, do_plot=False)
assert compare_values(ref_rmsd, rmsd, 6, 'known rmsd B787')
def test_deprecated_qcdb_align_scramble():
with pytest.warns(FutureWarning) as err:
mill = qcdb.align.compute_scramble(4, do_resort=False, do_shift=False, do_rotate=False, deflection=1.0, do_mirror=False)
assert compare_arrays([0,1,2,3], mill.atommap, 4, 'atommap')
# <<< TODO Deprecated! Delete when the error messages are removed. >>>
def test_deprecated_dcft_calls():
psi4.geometry('He')
err_substr = "All instances of 'dcft' should be replaced with 'dct'."
driver_calls = [psi4.energy, psi4.optimize, psi4.gradient, psi4.hessian, psi4.frequencies]
for call in driver_calls:
with pytest.raises(psi4.UpgradeHelper) as e:
call('dcft', basis='cc-pvdz')
assert err_substr in str(e.value)
# The errors trapped below are C-side, so they're nameless, Py-side.
with pytest.raises(Exception) as e:
psi4.set_module_options('dcft', {'e_convergence': 9})
assert err_substr in str(e.value)
with pytest.raises(Exception) as e:
psi4.set_module_options('dct', {'dcft_functional': 'odc-06'})
assert err_substr in str(e.value)
def test_deprecated_component_dipole():
#with pytest.warns(FutureWarning) as e:
psi4.set_variable("current dipole x", 5)
with pytest.warns(FutureWarning) as e:
ans = psi4.variable("current dipole x")
assert ans == 5
|
ashutoshvt/psi4
|
tests/pytests/test_misc.py
|
Python
|
lgpl-3.0
| 3,652
|
[
"Psi4"
] |
41b003cba25b983fad005d9d457a9d1b9f6e6877414f525c4b0b48a05717d412
|
import ast
import logging
from sqlalchemy import and_, not_, or_
from sqlalchemy.sql.operators import ColumnOperators as operator
COMPARE_OPERATORS = {
ast.Eq: operator.__eq__,
ast.NotEq: operator.__ne__,
ast.Lt: operator.__lt__,
ast.LtE: operator.__le__,
ast.Gt: operator.__gt__,
ast.GtE: operator.__ge__,
ast.Is: operator.is_,
ast.IsNot: operator.isnot,
ast.In: operator.in_,
ast.NotIn: operator.notin_,
}
OPERATORS = {
ast.Add: operator.__add__,
ast.Sub: operator.__sub__,
ast.Mult: operator.__mul__,
ast.Div: operator.__div__,
}
BOOLEAN_OPERATORS = {ast.And: and_, ast.Or: or_}
UNARY_OPERATORS = {
ast.Not: not_,
# ast.UAdd: operator.pos,
# ast.USub: operator.neg
}
BUILTINS = {"None": None}
class InvalidQueryError(Exception):
pass
def query_parse(clause, **symbols):
"""
Parses query clause.
Query Clause Identifier Rules:
Prefix + Identifier
Prefix Rules:
Is optional
Must start with a-z OR A-Z
Can contain a-z OR A-Z OR 0-9 OR _
Must end with .
Identifier Rules:
Is mandatory
Must start with a-z OR A-Z
Can contain a-z OR A-Z OR 0-9 OR _
Supported Comparators (case-insensitive)
- ==
- !=
- <
- <=
- >
- >=
- in
Supported Functions
- like
- ilike
Supported Operands (case-insensitive)
- and
- or
- not
Null Handling
<identifier> is None OR <identifier> == None
Examples:
- age > 1
- user.age >= 10 and user.age < 100
- ((user.age < 18 and movie.ratings in ("PG", "PG13") or user.age > 18)
:param clause: The clause to be parsed.
:type clause: str
:raises InvalidQueryError: Raised if the `clause` is not valid.
:returns: sqlalchemy boolean clause list.
:rtype: tuple
.. example::
>>> str( query_parse("w.wf_id == 1") )
'workflow.wf_id = :wf_id_1'
>>> str( query_parse("w.wf_id > 1 and w.wf_id < 5") )
'workflow.wf_id > :wf_id_1 AND workflow.wf_id < :wf_id_2'
"""
try:
n = ast.parse(clause.strip(",") + ",", mode="eval").body
except SyntaxError as e:
raise InvalidQueryError("Invalid query: %s" % e)
if not isinstance(n, ast.Tuple):
raise InvalidQueryError("Invalid condition: must evaluate to a boolean value")
return _QueryEvaluator(**symbols).visit(n)
class _QueryEvaluator(ast.NodeVisitor):
def __init__(self, **symbols):
super().__init__()
self._symbols = symbols
self._log = logging.getLogger(__name__)
# Compare
def visit_Compare(self, n):
# Compare(expr left, cmpop* ops, expr* comparators)
left = self.visit(n.left)
result = True
for i, v in zip(n.ops, n.comparators):
op = COMPARE_OPERATORS[i.__class__]
comparator = self.visit(v)
result = result and op(left, comparator)
left = comparator
return result
# BoolOp
def visit_BoolOp(self, n):
# boolop = And | Or
op = BOOLEAN_OPERATORS[n.op.__class__]
return op(*(self.visit(i) for i in n.values))
# BinOp
def visit_BinOp(self, n):
left = self.visit(n.left)
right = self.visit(n.right)
op = OPERATORS[n.op.__class__]
return op(left, right)
# UnaryOp
def visit_UnaryOp(self, n):
# UnaryOp(unaryop op, expr operand)
self._log.info("UnaryOp <%s> <%s>", n.op.__class__, n.operand)
op = UNARY_OPERATORS[n.op.__class__]
operand = self.visit(n.operand)
return op(operand)
# Identifiers
def visit_Attribute(self, n):
self._log.info("Attribute <%s> <%s>", n.value, n.attr)
value = self.visit(n.value)
if isinstance(value, dict):
return value[n.attr]
return getattr(value, n.attr)
def visit_Subscript(self, n):
self._log.info("Subscript <%s> <%s>", n.value, n.slice)
value = self.visit(n.value)
slice = self.visit(n.slice)
return value[slice]
def visit_Index(self, n):
return self.visit(n.value)
def visit_keyword(self, n):
return {n.arg: n.value}
def visit_Name(self, n):
self._log.info("Name <%s>", n.id)
name = self._symbols.get(n.id, BUILTINS.get(n.id, None))
if n.id not in self._symbols and n.id not in BUILTINS:
raise NameError(
"Invalid name <%s> at Line <%d> Col "
"<%d>" % (n.id, n.lineno, n.col_offset)
)
return name
# Literals
def visit_Num(self, n):
return n.n
def visit_Str(self, n):
return n.s
def visit_NameConstant(self, n):
self._log.info("NameConstant <%s>", n.value)
return n.value
def visit_List(self, n):
return [self.visit(n) for n in n.elts]
def visit_Tuple(self, n):
return tuple(self.visit(e) for e in n.elts)
def visit_Set(self, n):
return {self.visit(n) for n in n.elts}
def visit_Call(self, n):
args = []
try:
func = self.visit(n.func)
except AttributeError:
raise InvalidQueryError(
"Invalid name <%s> at Line <%d> Col <%d>"
% (n.func.attr, n.lineno, n.col_offset)
)
# Args
args.extend([self.visit(a) for a in n.args])
# Keyword Args
kwargs = {}
for k in n.keywords:
kwargs.update(self.visit(k))
return func(*args, **kwargs)
# Catch All
def generic_visit(self, n):
raise InvalidQueryError(
"Invalid query at Line <%d> Col <%d>" % (n.lineno, n.col_offset)
)
def _main():
import sys
logging.basicConfig(level=logging.DEBUG)
logging.debug("Expression <%s>", sys.argv[1])
from Pegasus.db.schema import Workflow
result = query_parse(sys.argv[1], Workflow)
logging.debug("Evaluation Result <%s>", result[0])
if __name__ == "__main__":
_main()
|
pegasus-isi/pegasus
|
packages/pegasus-python/src/Pegasus/service/_query.py
|
Python
|
apache-2.0
| 6,134
|
[
"VisIt"
] |
c246d4ec9cae31685cc27b609e6e9a9c459b86c36ec44199ee8db0d2d85b77e8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/fitting/transform_pdf.py
# PDF for transformed variables
# @author Vanya BELYAEV Ivan.Belyaeve@itep.ru
# @date 2020-05-11
# =============================================================================
"""PDF for transformed variables
"""
# =============================================================================
__version__ = "$Revision:"
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2020-05-11"
__all__ = (
##
'TrPDF' , ## 1D-transformed PDF
##
)
# =============================================================================
import ROOT
import ostap.fitting.roofit
from ostap.fitting.roofuncs import var_mul
from ostap.fitting.funbasic import FUNC
from ostap.fitting.basic import PDF
from ostap.fitting.fit2d import PDF2
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.fitting.transform_pdf' )
else : logger = getLogger ( __name__ )
# =============================================================================
## @class TrPDF
# PDF of the transformed variable.
# - e.g. gaussian in log10 scale:
# @code
#
# x = ROOT.RooRealVar ( 'x' ,'' ,1,100000 ) ## original/old variable
# lx = ROOT.RooRealVar ( 'lx' ,'log10(x)',0,5 ) ## new_variable
# LX = Fun1D( lx , lx )
# NX = 10 ** LX ## old variable as a function of new variable
#
# ## PDF as function of old variable:
# g1 = Gauss_pdf ( 'G1' , xvar = x , mean = 10000 , sigma = 10000 )
#
# ## PDF as function of new variable
# g2 = TrPDF ( pdf = g1, new_var = NX )
# @endcode
#
# Optionally the absolute value of the jacobian can be specified:
# @code
# J = math.log(10) * ( 10**LX)
# ## PDF as function of new variable
# g2 = TrPDF ( pdf = g1, new_var = NX , jacob = J )
# @endcode
class TrPDF(PDF) :
""" PDF of the transformed variable.
- e.g. gaussian in log10 scale:
>>> x = ROOT.RooRealVar ( 'x' ,'' ,1,100000 ) ## original/old variable
>>> lx = ROOT.RooRealVar ( 'lx' ,'log10(x)',0,5 ) ## new_variable
>>> LX = Fun1D( lx , lx )
>>> NX = 10 ** LX ## old variable as function of new variable
- PDF as function of old variable:
>>> g1 = Gauss_pdf ( 'G1' , xvar = x , mean = 10000 , sigma = 10000 )
- PDF as function of new variable
>>> g2 = TrPDF ( pdf = g1, new_var = NX )
Optionally the absolute value of the jacobian can be specified:
>>> J = math.log(10) * ( 10**LX)
>>> g2 = TrPDF ( pdf = g1, new_var = NX , jacob = J )
"""
def __init__ ( self ,
pdf , ## template PDF of "old" variabe
new_var , ## old variable as function of a new variable
jacob = None , ## absolute value of the Jacobian: |d(old)/d(new)|
name = '' ) : ## proposed name
assert pdf and isinstance ( pdf , PDF ) , 'Invalid PDF type %s' % type ( pdf )
assert new_var and isinstance ( new_var , FUNC ) , 'Invalid new_var type %s' % type ( new_var )
xvar = new_var.xvar
name = name if name else "Transform_%s" % pdf.name
PDF.__init__ ( self , name , xvar = xvar )
if not jacob : jacob = abs ( new_var.dFdX () )
assert isinstance ( jacob , FUNC ) , 'Invalid Jacobian %s' % type ( jacob )
if not xvar in jacob :
self.warning ( 'Jacobian has does not depend on xvar!')
self.__jacob = jacob
self.__new_var = new_var
self.__ori_pdf = pdf
self.__clo_pdf = pdf.clone ( xvar = new_var.fun )
## new PDF as a function
self.__new_fun = var_mul ( jacob.fun , self.__clo_pdf.pdf )
from ostap.fitting.basic import make_pdf
self.__new_pdf = make_pdf ( self.__new_fun , [ xvar ] , self.name + '_' )
## finally the new PDF:
self.pdf = self.__new_pdf.pdf
self.config = {
'name' : self.name ,
'pdf' : self.orig_pdf ,
'new_var' : self.new_var ,
'jacob' : self.jacob ,
}
self.checked_keys.add ( 'pdf' )
self.checked_keys.add ( 'new_var' )
self.checked_keys.add ( 'jacob' )
@property
def orig_pdf ( self ) :
"""``orig_pdf'': original, not-transformed PDF"""
return self.__ori_pdf
@property
def new_pdf ( self ) :
"""``new_pdf'': transformed PDF"""
return self.__new_pdf
@property
def new_var ( self ) :
"""``new_var'' : new/old variable """
return self.__new_var
@property
def jacob( self ) :
"""``jacob'' : absolute value of the Jacobian |d(old)/d(new)| """
return self.__jacob
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
## The END
# =============================================================================
|
OstapHEP/ostap
|
ostap/fitting/transform_pdf.py
|
Python
|
bsd-3-clause
| 5,716
|
[
"Gaussian"
] |
08d1e63ca94b1239ea66593f0cc5d13fc691f621dadd06604a4738927317bd6f
|
#!/usr/bin/env python
"""
"""
from __future__ import division, absolute_import
import logging
import numpy as np
import scipy as sp
import scipy.interpolate as spinterp
import time
import datetime as dt
import matplotlib.pyplot as plt
try:
from mayavi import mlab
except ImportError:
mlab = None
except (ValueError,RuntimeError) as e:
mlab = None
print('Mayavi not imported due to {}'.format(e))
try:
plt.get_cmap('viridis')
defmap3d = 'viridis'
except ValueError:
defmap3d = 'jet'
#%%
def plot3Dslice(geodata, surfs, vbounds, titlestr='', time=0, gkey=None, cmap=defmap3d,
ax=None, fig=None, method='linear', fill_value=np.nan, view=None, units='',
colorbar=False, outimage=False):
"""
This function create 3-D slice image given either a surface or list of
coordinates to slice through.
Inputs:
geodata - A geodata object that will be plotted in 3D
surfs - This is a three element list. Each element can either be
altlist - A list of the altitudes that RISR parameter slices will be taken at
xyvecs- A list of x and y numpy arrays that have the x and y coordinates that the
data will be interpolated over.
ie, xyvecs=[np.linspace(-100.0,500.0), np.linspace(0.0,600.0)]
vbounds = a list of bounds for the geodata objec's parameters. ie, vbounds=[500,2000]
title - A string that holds for the overall image
ax - A handle for an axis that this will be plotted on.
Returns a mayavi image with a surface
"""
if mlab is None:
print('mayavi was not successfully imported')
return
assert geodata.coordnames.lower() == 'cartesian'
datalocs = geodata.dataloc
xvec = sp.unique(datalocs[:, 0])
yvec = sp.unique(datalocs[:, 1])
zvec = sp.unique(datalocs[:, 2])
assert len(xvec)*len(yvec)*len(zvec) == datalocs.shape[0]
#determine if the ordering is fortran or c style ordering
diffcoord = sp.diff(datalocs, axis=0)
if diffcoord[0, 1] != 0.0:
ar_ord = 'f'
elif diffcoord[0, 2] != 0.0:
ar_ord = 'c'
elif diffcoord[0, 0] != 0.0:
if len(np.where(diffcoord[:, 1])[0]) == 0:
ar_ord = 'f'
elif len(np.where(diffcoord[:, 2])[0]) == 0:
ar_ord = 'c'
matshape = (len(yvec), len(xvec), len(zvec))
# reshape the arrays into a matricies for plotting
x, y, z = [sp.reshape(datalocs[:, idim], matshape, order=ar_ord) for idim in range(3)]
if gkey is None:
gkey = geodata.datanames()[0]
porig = geodata.data[gkey][:, time]
mlab.figure(fig)
#determine if list of slices or surfaces are given
islists = isinstance(surfs[0], list)
if isinstance(surfs[0], np.ndarray):
onedim = surfs[0].ndim == 1
#get slices for each dimension out
surflist = []
if islists or onedim:
p = np.reshape(porig, matshape, order=ar_ord)
xslices = surfs[0]
for isur in xslices:
indx = sp.argmin(sp.absolute(isur-xvec))
xtmp = x[:, indx]
ytmp = y[:, indx]
ztmp = z[:, indx]
ptmp = p[:, indx]
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
yslices = surfs[1]
for isur in yslices:
indx = sp.argmin(sp.absolute(isur-yvec))
xtmp = x[indx]
ytmp = y[indx]
ztmp = z[indx]
ptmp = p[indx]
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
zslices = surfs[2]
for isur in zslices:
indx = sp.argmin(sp.absolute(isur-zvec))
xtmp = x[:, :, indx]
ytmp = y[:, :, indx]
ztmp = z[:, :, indx]
ptmp = p[:, :, indx]
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
else:
# For a general surface.
xtmp, ytmp, ztmp = surfs[:]
gooddata = ~np.isnan(porig)
curparam = porig[gooddata]
curlocs = datalocs[gooddata]
new_coords = np.column_stack((xtmp.flatten(), ytmp.flatten(), ztmp.flatten()))
ptmp = spinterp.griddata(curlocs, curparam, new_coords, method, fill_value)
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
mlab.title(titlestr, color=(0, 0, 0))
#mlab.outline(color=(0,0,0))
mlab.axes(color=(0, 0, 0), x_axis_visibility=True, xlabel='x in km', y_axis_visibility=True,
ylabel='y in km', z_axis_visibility=True, zlabel='z in km')
mlab.orientation_axes(xlabel='x in km', ylabel='y in km', zlabel='z in km')
if view is not None:
# order of elevation is changed between matplotlib and mayavi
mlab.view(view[0], view[1])
if colorbar:
if units == '':
titlestr = gkey
else:
titlstr = gkey +' in ' +units
mlab.colorbar(surflist[-1], title=titlstr, orientation='vertical')
if outimage:
arr = mlab.screenshot(fig, antialiased=True)
mlab.close(fig)
return arr
else:
return surflist
|
jswoboda/GeoDataPython
|
GeoData/plottingmayavi.py
|
Python
|
mit
| 6,180
|
[
"Mayavi"
] |
1ecec12e1eebe2d6ebd1457613dcdb86564c2108acbe94275a88cadb68740368
|
""" Collection of utilities for locating certs, proxy, CAs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import DIRAC
from DIRAC import gConfig
g_SecurityConfPath = "/DIRAC/Security"
def getProxyLocation():
"""Get the path of the currently active grid proxy file"""
for envVar in ["GRID_PROXY_FILE", "X509_USER_PROXY"]:
if envVar in os.environ:
proxyPath = os.path.realpath(os.environ[envVar])
if os.path.isfile(proxyPath):
return proxyPath
# /tmp/x509up_u<uid>
proxyName = "x509up_u%d" % os.getuid()
if os.path.isfile("/tmp/%s" % proxyName):
return "/tmp/%s" % proxyName
# No gridproxy found
return False
# Retrieve CA's location
def getCAsLocation():
"""Retrieve the CA's files location"""
# Grid-Security
retVal = gConfig.getOption("%s/Grid-Security" % g_SecurityConfPath)
if retVal["OK"]:
casPath = "%s/certificates" % retVal["Value"]
if os.path.isdir(casPath):
return casPath
# CAPath
retVal = gConfig.getOption("%s/CALocation" % g_SecurityConfPath)
if retVal["OK"]:
casPath = retVal["Value"]
if os.path.isdir(casPath):
return casPath
# Look up the X509_CERT_DIR environment variable
if "X509_CERT_DIR" in os.environ:
casPath = os.environ["X509_CERT_DIR"]
return casPath
# rootPath./etc/grid-security/certificates
casPath = "%s/etc/grid-security/certificates" % DIRAC.rootPath
if os.path.isdir(casPath):
return casPath
# /etc/grid-security/certificates
casPath = "/etc/grid-security/certificates"
if os.path.isdir(casPath):
return casPath
# No CA's location found
return False
# Retrieve CA's location
def getCAsDefaultLocation():
"""Retrievethe CAs Location inside DIRAC etc directory"""
# rootPath./etc/grid-security/certificates
casPath = "%s/etc/grid-security/certificates" % DIRAC.rootPath
return casPath
# TODO: Static depending on files specified on CS
# Retrieve certificate
def getHostCertificateAndKeyLocation(specificLocation=None):
"""Retrieve the host certificate files location.
Lookup order:
* ``specificLocation`` (probably broken, don't use it)
* Environment variables (``DIRAC_X509_HOST_CERT`` and ``DIRAC_X509_HOST_KEY``)
* CS (``/DIRAC/Security/CertFile`` and ``/DIRAC/Security/CertKey``)
* Alternative exotic options, with ``prefix`` in ``server``, ``host``, ``dirac``, ``service``:
* in `<DIRAC rootpath>/etc/grid-security/` for ``<prefix>cert.pem`` and ``<prefix>key.pem``
* in the path defined in the CS in ``/DIRAC/Security/Grid-Security``
:param specificLocation: CS path to look for a the path to cert and key, which then should be the same.
Probably does not work, don't use it
:returns: tuple ``(<cert location>, <key location>)`` or ``False``
"""
fileDict = {}
# First, check the environment variables
for fileType, envVar in (("cert", "DIRAC_X509_HOST_CERT"), ("key", "DIRAC_X509_HOST_KEY")):
if envVar in os.environ and os.path.exists(os.environ[envVar]):
fileDict[fileType] = os.environ[envVar]
for fileType in ("cert", "key"):
# Check if we already have the info
if fileType in fileDict:
continue
# Direct file in config
retVal = gConfig.getOption("%s/%sFile" % (g_SecurityConfPath, fileType.capitalize()))
if retVal["OK"]:
fileDict[fileType] = retVal["Value"]
continue
fileFound = False
for filePrefix in ("server", "host", "dirac", "service"):
# Possible grid-security's
paths = []
retVal = gConfig.getOption("%s/Grid-Security" % g_SecurityConfPath)
if retVal["OK"]:
paths.append(retVal["Value"])
paths.append("%s/etc/grid-security/" % DIRAC.rootPath)
for path in paths:
filePath = os.path.realpath("%s/%s%s.pem" % (path, filePrefix, fileType))
if os.path.isfile(filePath):
fileDict[fileType] = filePath
fileFound = True
break
if fileFound:
break
if "cert" not in fileDict or "key" not in fileDict:
return False
# we can specify a location outside /opt/dirac/etc/grid-security directory
if specificLocation:
fileDict["cert"] = gConfig.getValue(specificLocation, fileDict["cert"])
fileDict["key"] = gConfig.getValue(specificLocation, fileDict["key"])
return (fileDict["cert"], fileDict["key"])
def getCertificateAndKeyLocation():
"""Get the locations of the user X509 certificate and key pem files"""
certfile = ""
if "X509_USER_CERT" in os.environ:
if os.path.exists(os.environ["X509_USER_CERT"]):
certfile = os.environ["X509_USER_CERT"]
if not certfile:
if os.path.exists(os.environ["HOME"] + "/.globus/usercert.pem"):
certfile = os.environ["HOME"] + "/.globus/usercert.pem"
if not certfile:
return False
keyfile = ""
if "X509_USER_KEY" in os.environ:
if os.path.exists(os.environ["X509_USER_KEY"]):
keyfile = os.environ["X509_USER_KEY"]
if not keyfile:
if os.path.exists(os.environ["HOME"] + "/.globus/userkey.pem"):
keyfile = os.environ["HOME"] + "/.globus/userkey.pem"
if not keyfile:
return False
return (certfile, keyfile)
def getDefaultProxyLocation():
"""Get the location of a possible new grid proxy file"""
for envVar in ["GRID_PROXY_FILE", "X509_USER_PROXY"]:
if envVar in os.environ:
proxyPath = os.path.realpath(os.environ[envVar])
return proxyPath
# /tmp/x509up_u<uid>
proxyName = "x509up_u%d" % os.getuid()
return "/tmp/%s" % proxyName
|
ic-hep/DIRAC
|
src/DIRAC/Core/Security/Locations.py
|
Python
|
gpl-3.0
| 6,026
|
[
"DIRAC"
] |
0a3a502e238b26a2b8b6ddaba30a4cfba111a311a8633767914a27f3aa03efb6
|
#!/usr/bin/env python
import numpy as np
import sys, subprocess
import os
"""
This is a python program which creates a box of acn for use in lammps simulations.
"""
if len(sys.argv) != 4:
print("Usage python build_acn.py numacns numco2s type")
exit()
numacns=int(sys.argv[1])
numco2s=int(sys.argv[2])
type=int(sys.argv[3])
def gen_packmol(nacn, nco2):
n=nacn+nco2
L=(n/0.034)**(1/3.)-2
pmolfile = "acncx.pmol"
pm = open(pmolfile,'w')
pm.write("tolerance 2.0\n")
pm.write("filetype xyz\n")
pm.write("output system.xyz\n")
pm.write("\n")
pm.write("structure acn.xyz\n")
pm.write(" number %s\n" % nacn)
pm.write(" inside box 2. 2. 2. %s %s %s\n" % (L, L, L))
pm.write("end structure\n")
pm.write("\n")
pm.write("structure co2.xyz\n")
pm.write(" number %s\n" % nco2)
pm.write(" inside box 2. 2. 2. %s %s %s\n" % (L, L, L))
pm.write("end structure\n")
pm.close()
acnfile = "acn.xyz"
acn = open(acnfile, 'w')
acn.write("3\n")
acn.write("acn\n")
acn.write("ch3 0.000 0.000 0.000\n")
acn.write("c 1.540 0.000 0.000\n")
acn.write("n 2.697 0.000 0.000\n")
acn.close()
co2file = "co2.xyz"
co2 = open(co2file, 'w')
co2.write("3\n")
co2.write("co2\n")
co2.write("C 0.000 0.000 0.000\n")
co2.write("O 1.160 0.000 0.000\n")
co2.write("O -1.160 0.000 0.000\n")
co2.close()
print("Please run the following command: packmol < acncx.pmol")
def tolammps(nacn,nco2):
n=nacn+nco2
datafile = "data.acncx"
dat = open(datafile, 'w')
x,y,z = np.genfromtxt('system.xyz', usecols=(1,2,3), unpack=True, skip_header=2)
q=[-0.269, 0.129, -0.398,0.700,-0.350,-0.350]
type=[1,2,3,4,5,5]
L = (n/0.034)**(1/3.)
dat.write("LAMMPS Description\n")
dat.write("\n")
dat.write("%s atoms\n" % (n*3))
dat.write("%s bonds\n" % (n*2))
dat.write("%s angles\n" % (n*1))
dat.write("0 dihedrals\n")
dat.write("0 impropers\n")
dat.write("\n")
dat.write("5 atom types\n")
dat.write("3 bond types\n")
dat.write("2 angle types\n")
dat.write("\n")
dat.write("0.0 %s xlo xhi\n" % L)
dat.write("0.0 %s ylo yhi\n" % L)
dat.write("0.0 %s zlo zhi\n" % L)
dat.write("\n")
dat.write("Masses\n")
dat.write("\n")
dat.write("1 15.035\n")
dat.write("2 12.011\n")
dat.write("3 14.007\n")
dat.write("4 12.011\n")
dat.write("5 15.999\n")
dat.write("\n")
dat.write("Atoms\n")
dat.write("\n")
for i in range(nacn):
for j in range(0,3):
aindex=i*3+1+j
mindex=i+1
dat.write("%s %s %s %s %s %s %s\n" % (aindex, mindex, type[j], q[j], x[aindex-1], y[aindex-1], z[aindex-1]))
for i in range(nco2):
for k in range(3,6):
j=k-3
aindex=nacn*3+i*3+1+j
mindex=nacn+i+1
dat.write("%s %s %s %s %s %s %s\n" % (aindex, mindex, type[k], q[k], x[aindex-1], y[aindex-1], z[aindex-1]))
dat.write("\n")
dat.write("Bonds\n")
dat.write("\n")
for i in range(nacn):
b1index=i*2+1
b2index=i*2+2
btype = [1,2]
ch3index =i*3+1
cindex=i*3+2
nindex=i*3+3
dat.write("%s %s %s %s\n" % (b1index,btype[0],cindex,ch3index))
dat.write("%s %s %s %s\n" % (b2index,btype[1],cindex,nindex))
for i in range(nco2):
b1index=nacn*2+i*2+1
b2index=nacn*2+i*2+2
btype=3
cindex=nacn*3+i*3+1
o1index=nacn*3+i*3+2
o2index=nacn*3+i*3+3
dat.write("%s %s %s %s\n" % (b1index,btype,cindex,o1index))
dat.write("%s %s %s %s\n" % (b2index,btype,cindex,o2index))
dat.write("\n")
dat.write("Angles\n")
dat.write("\n")
for i in range(nacn):
aindex=i+1
atype=1
CH3index =i*3+1
Cindex=i*3+2
Nindex=i*3+3
dat.write("%s %s %s %s %s\n" % (aindex, atype, CH3index, Cindex, Nindex))
for i in range(nco2):
aindex=nacn+i+1
atype=2
cindex=nacn*3+i*3+1
o1index=nacn*3+i*3+2
o2index=nacn*3+i*3+3
dat.write("%s %s %s %s %s\n" % (aindex, atype, o1index, cindex, o2index))
dat.close()
if type == 0:
gen_packmol(numacns,numco2s)
elif type == 1:
tolammps(numacns,numco2s)
elif type == 3:
gen_packmol(numacns,numco2s)
subprocess.call(["/panfs/pfs.local/work/laird/e924p726/thompsonwork/Programs/Executables/packmol < acncx.pmol"],shell=True)
tolammps(numacns,numco2s)
else:
print("Error: Incorrect chosen type")
|
piskuliche/Python-Code-Collection
|
system_builds/acn/cxl/build_acn_cxl.py
|
Python
|
gpl-3.0
| 4,654
|
[
"LAMMPS"
] |
6e084a72e570d3fef660dccfe15bf4825936ed5b22380d374342afd4c37937ad
|
# -*- coding: utf-8 -*-
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import warnings
from collections.abc import Iterable
from functools import wraps
import ctypes
import numpy as np
from scipy._lib.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy._lib._ccallback import LowLevelCallable
from scipy import optimize
from scipy import integrate
import scipy.special as sc
import scipy.special._ufuncs as scu
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
get_distribution_names, _kurtosis, _ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, _get_fixed_fit_value, _check_shape)
from ._ksstats import kolmogn, kolmognp, kolmogni
from ._constants import (_XMIN, _EULER, _ZETA3,
_SQRT_2_OVER_PI, _LOG_SQRT_2_OVER_PI)
import scipy.stats._boost as _boost
def _remove_optimizer_parameters(kwds):
"""
Remove the optimizer-related keyword arguments 'loc', 'scale' and
'optimizer' from `kwds`. Then check that `kwds` is empty, and
raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
This function is used in the fit method of distributions that override
the default method and do not use the default optimization code.
`kwds` is modified in-place.
"""
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
kwds.pop('method', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
def _call_super_mom(fun):
# if fit method is overridden only for MLE and doesn't specify what to do
# if method == 'mm', this decorator calls generic implementation
@wraps(fun)
def wrapper(self, *args, **kwds):
method = kwds.get('method', 'mle').lower()
if method == 'mm':
return super(type(self), self).fit(*args, **kwds)
else:
return fun(self, *args, **kwds)
return wrapper
class ksone_gen(rv_continuous):
r"""Kolmogorov-Smirnov one-sided test statistic distribution.
This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
statistics :math:`D_n^+` and :math:`D_n^-`
for a finite sample size ``n`` (the shape parameter).
%(before_notes)s
See Also
--------
kstwobign, kstwo, kstest
Notes
-----
:math:`D_n^+` and :math:`D_n^-` are given by
.. math::
D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`ksone` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
for probability distribution functions", The Annals of Mathematical
Statistics, 22(4), pp 592-596 (1951).
%(example)s
"""
def _pdf(self, x, n):
return -scu._smirnovp(n, x)
def _cdf(self, x, n):
return scu._smirnovc(n, x)
def _sf(self, x, n):
return sc.smirnov(n, x)
def _ppf(self, q, n):
return scu._smirnovci(n, q)
def _isf(self, q, n):
return sc.smirnovi(n, q)
ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
class kstwo_gen(rv_continuous):
r"""Kolmogorov-Smirnov two-sided test statistic distribution.
This is the distribution of the two-sided Kolmogorov-Smirnov (KS)
statistic :math:`D_n` for a finite sample size ``n``
(the shape parameter).
%(before_notes)s
See Also
--------
kstwobign, ksone, kstest
Notes
-----
:math:`D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a (continuous) CDF and :math:`F_n` is an empirical CDF.
`kstwo` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Simard, R., L'Ecuyer, P. "Computing the Two-Sided
Kolmogorov-Smirnov Distribution", Journal of Statistical Software,
Vol 39, 11, 1-18 (2011).
%(example)s
"""
def _get_support(self, n):
return (0.5/(n if not isinstance(n, Iterable) else np.asanyarray(n)),
1.0)
def _pdf(self, x, n):
return kolmognp(n, x)
def _cdf(self, x, n):
return kolmogn(n, x)
def _sf(self, x, n):
return kolmogn(n, x, cdf=False)
def _ppf(self, q, n):
return kolmogni(n, q, cdf=True)
def _isf(self, q, n):
return kolmogni(n, q, cdf=False)
# Use the pdf, (not the ppf) to compute moments
kstwo = kstwo_gen(momtype=0, a=0.0, b=1.0, name='kstwo')
class kstwobign_gen(rv_continuous):
r"""Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
distance of the theoretical (continuous) CDF from the empirical CDF.
(see `kstest`).
%(before_notes)s
See Also
--------
ksone, kstwo, kstest
Notes
-----
:math:`\sqrt{n} D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`kstwobign` describes the asymptotic distribution (i.e. the limit of
:math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Feller, W. "On the Kolmogorov-Smirnov Limit Theorems for Empirical
Distributions", Ann. Math. Statist. Vol 19, 177-189 (1948).
%(example)s
"""
def _pdf(self, x):
return -scu._kolmogp(x)
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self, q):
return sc.kolmogi(q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (``loc``) keyword specifies the mean.
The scale (``scale``) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_normal(size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
For the normal distribution, method of moments and maximum likelihood
estimation give identical fits, and explicit formulas for the estimates
are available.
This function uses these explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` and `method` arguments are ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
def _munp(self, n):
"""
@returns Moments of standard normal distribution for integer n >= 0
See eq. 16 of https://arxiv.org/abs/1209.4340v2
"""
if n % 2 == 0:
return sc.factorial2(n - 1)
else:
return 0.
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
with np.errstate(divide='ignore'):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < "
"(x - loc)/scale < {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b, size=None, random_state=None):
return random_state.beta(a, b, size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return _boost._beta_pdf(x, a, b)
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return _boost._beta_cdf(x, a, b)
def _sf(self, x, a, b):
return _boost._beta_sf(x, a, b)
def _isf(self, x, a, b):
with warnings.catch_warnings():
# See gh-14901
message = "overflow encountered in _beta_isf"
warnings.filterwarnings('ignore', message=message)
return _boost._beta_isf(x, a, b)
def _ppf(self, q, a, b):
with warnings.catch_warnings():
message = "overflow encountered in _beta_ppf"
warnings.filterwarnings('ignore', message=message)
return _boost._beta_ppf(q, a, b)
def _stats(self, a, b):
return(
_boost._beta_mean(a, b),
_boost._beta_variance(a, b),
_boost._beta_skewness(a, b),
_boost._beta_kurtosis_excess(a, b))
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super()._fitstart(data, args=(a, b))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where `method="MLE"` and
both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super().fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b, size=None, random_state=None):
u1 = gamma.rvs(a, size=size, random_state=random_state)
u2 = gamma.rvs(b, size=size, random_state=random_state)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
mielke : Mielke Beta-Kappa / Dagum distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c - 1} / (1 + x^{-c})^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_. The distribution
is also commonly referred to as the Dagum distribution [2]_. If the
parameter :math:`c < 1` then the mean of the distribution does not
exist and if :math:`c < 2` the variance does not exist [2]_.
The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://en.wikipedia.org/wiki/Dagum_distribution
.. [3] Kleiber, Christian. "A guide to the Dagum distributions."
Modeling Income Distributions and Lorenz Curves pp 97-117 (2008).
%(example)s
"""
# Do not set _support_mask to rv_continuous._open_support_mask
# Whether the left-hand endpoint is suitable for pdf evaluation is dependent
# on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
output = _lazywhere(x == 0, [x, c, d],
lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
f2 = lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
((1 + x_ ** (-c_)) ** (d_ + 1.0))))
if output.ndim == 0:
return output[()]
return output
def _logpdf(self, x, c, d):
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
- (d_+1) * sc.log1p(x_**(c_))),
f2 = lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+ sc.xlogy(-c_ - 1, x_)
- sc.xlog1py(d_+1, x_**(-c_))))
if output.ndim == 0:
return output[()]
return output
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _logcdf(self, x, c, d):
return sc.log1p(x**(-c)) * (-d)
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return np.log1p(- (1 + x**(-c))**(-d))
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _stats(self, c, d):
nc = np.arange(1, 5).reshape(4,1) / c
#ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
mu = np.where(c > 1.0, e1, np.nan)
mu2_if_c = e2 - mu**2
mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
g1 = _lazywhere(
c > 3.0,
(c, e1, e2, e3, mu2_if_c),
lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
fillvalue=np.nan)
g2 = _lazywhere(
c > 4.0,
(c, e1, e2, e3, e4, mu2_if_c),
lambda c, e1, e2, e3, e4, mu2_if_c: (
((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
fillvalue=np.nan)
if np.ndim(c) == 0:
return mu.item(), mu2.item(), g1.item(), g2.item()
return mu, mu2, g1, g2
def _munp(self, n, c, d):
def __munp(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
lambda c, d, n: __munp(n, c, d),
np.nan)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} / (1 + x^c)^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
.. [3] "Burr distribution",
https://en.wikipedia.org/wiki/Burr_distribution
%(example)s
"""
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
See Also
--------
burr
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x >= 0` and :math:`c > 0`.
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _sf(self, x, df):
return sc.gammaincc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _isf(self, q, df):
return np.sqrt(2*sc.gammainccinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*np.exp(sc.gammaln(df/2.0+0.5)-sc.gammaln(df/2.0))
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
For the noncentral chi-square distribution, see `ncx2`.
%(before_notes)s
See Also
--------
ncx2
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
The chi-squared distribution is a special case of the gamma
distribution, with gamma parameters ``a = df/2``, ``loc = 0`` and
``scale = 2``.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return random_state.chisquare(df, size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return 2*sc.gammaincinv(df/2, p)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _logpdf(self, x):
c = np.cos(x)
return _lazywhere(c != -1, (c,),
lambda c: np.log1p(c) - np.log(2*np.pi),
fillvalue=-np.inf)
def _cdf(self, x):
return scu._cosine_cdf(x)
def _sf(self, x):
return scu._cosine_cdf(-x)
def _ppf(self, p):
return scu._cosine_invcdf(p)
def _isf(self, p):
return -scu._cosine_invcdf(p)
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
u = random_state.uniform(size=size)
gm = gamma.rvs(a, size=size, random_state=random_state)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
u = random_state.uniform(size=size)
w = weibull_min.rvs(c, size=size, random_state=random_state)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
The exponential distribution is a special case of the gamma
distributions, with gamma shape parameter ``a = 1``.
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_exponential(size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'`,
this function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are
ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
Also known as the exponentially modified Gaussian distribution [1]_.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
the Wikpedia article [1]_) involves three parameters, :math:`\mu`,
:math:`\lambda` and :math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
References
----------
.. [1] Exponentially modified Gaussian distribution, Wikipedia,
https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
%(example)s
"""
def _rvs(self, K, size=None, random_state=None):
expval = random_state.standard_exponential(size) * K
gval = random_state.standard_normal(size)
return expval + gval
def _pdf(self, x, K):
return np.exp(self._logpdf(x, K))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = invK * (0.5 * invK - x)
return exparg + _norm_logcdf(x - invK) - np.log(K)
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
logprod = expval + _norm_logcdf(x - invK)
return _norm_cdf(x) - np.exp(logprod)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
logprod = expval + _norm_logcdf(x - invK)
return _norm_cdf(-x) + np.exp(logprod)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, numpy.random.Generator.weibull
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
and its cumulative distribution function is:
.. math::
F(x, a, c) = [1-\exp(-x^c)]^a
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters:
* :math:`a` is the exponentiation parameter,
with the special case :math:`a=1` corresponding to the
(non-exponentiated) Weibull distribution `weibull_min`.
* :math:`c` is the shape parameter of the non-exponentiated Weibull law.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x >= 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c, size=None, random_state=None):
z = random_state.standard_normal(size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _sf(self, x, c):
return _norm_sf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _isf(self, q, c):
tmp = -c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return abs(cauchy.rvs(loc=c, size=size,
random_state=random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
For the noncentral F distribution, see `ncf`.
%(before_notes)s
See Also
--------
ncf
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, size=None, random_state=None):
return random_state.f(dfn, dfd, size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = (m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
- (((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)))
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c, size=None, random_state=None):
return abs(random_state.standard_normal(size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is also often simply called the Weibull
distribution. It arises as the limiting distribution of the rescaled
minimum of iid random variables.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.Generator.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
The Weibull Maximum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is the limiting distribution of rescaled
maximum of iid random variables. This is the distribution of -X
if X is from the `weibull_min` function.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# weibull_max.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x >= 0`, :math:`c > 0`.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
# Two mathematically equivalent expressions for log(pdf(x, c)):
# log(pdf(x, c)) = log(c) - x - (c + 1)*log(1 + exp(-x))
# = log(c) + c*x - (c + 1)*log(1 + exp(x))
mult = -(c - 1) * (x < 0) - 1
absx = np.abs(x)
return np.log(c) + mult*absx - (c+1) * sc.log1p(np.exp(-absx))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _get_support(self, c):
c = np.asarray(c)
b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
a = np.where(c >= 0, self.a, self.a)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _stats(self, c, moments='mv'):
if 'm' not in moments:
m = None
else:
m = _lazywhere(c < 1, (c,),
lambda xi: 1/(1 - xi),
np.inf)
if 'v' not in moments:
v = None
else:
v = _lazywhere(c < 1/2, (c,),
lambda xi: 1 / (1 - xi)**2 / (1 - 2*xi),
np.nan)
if 's' not in moments:
s = None
else:
s = _lazywhere(c < 1/3, (c,),
lambda xi: 2 * (1 + xi) * np.sqrt(1 - 2*xi) /
(1 - 3*xi),
np.nan)
if 'k' not in moments:
k = None
else:
k = _lazywhere(c < 1/4, (c,),
lambda xi: 3 * (1 - 2*xi) * (2*xi**2 + xi + 3) /
(1 - 3*xi) / (1 - 4*xi) - 3,
np.nan)
return m, v, s, k
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _sf(self, x, a, b, c):
return np.exp((-a-b)*x + b*(-sc.expm1(-c*x))/c)
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.where(abs(c) == np.inf, 0, 1)
def _get_support(self, c):
_b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
_a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return _a, _b
def _loglogcdf(self, x, c):
# Returns log(-log(cdf(x, c)))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = _lazywhere(~((cx == 1) | (cx == -np.inf)),
(pex2, logpex2, logex2),
lambda pex2, lpex2, lex2: -pex2 + lpex2 - lex2,
fillvalue=-np.inf)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super()._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
"""Inverse of the digamma function (real positive arguments only).
This function is used in the `fit` method of `gamma_gen`.
The function uses either optimize.fsolve or optimize.newton
to solve `sc.digamma(x) - y = 0`. There is probably room for
improvement, but currently it works over a wide range of y:
>>> rng = np.random.default_rng()
>>> y = 64*rng.standard_normal(1000000)
>>> y.min(), y.max()
(-311.43592651416662, 351.77388222276869)
>>> x = [_digammainv(t) for t in y]
>>> np.abs(sc.digamma(x) - y).max()
1.1368683772161603e-13
"""
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} e^{-x}}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
Gamma distributions are sometimes parameterized with two variables,
with a probability density function of:
.. math::
f(x, \alpha, \beta) = \frac{\beta^\alpha x^{\alpha - 1} e^{-\beta x }}{\Gamma(\alpha)}
Note that this parameterization is equivalent to the above, with
``scale = 1 / beta``.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
return random_state.standard_gamma(a, size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _isf(self, q, a):
return sc.gammainccinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `2 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super()._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`
and `method='MLE'`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.
\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
method = kwds.get('method', 'mle')
if floc is None or method.lower() == 'mm':
# loc is not fixed. Use the default fit method.
return super().fit(data, *args, **kwds)
# We already have this value, so just pop it from kwds.
kwds.pop('floc', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data).mean() = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return a > 0
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super().fit(data, *args, **kwds)
if fit.__doc__:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
See Also
--------
gamma, invgamma, weibull_min
Notes
-----
The probability density function for `gengamma` is ([1]_):
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
.. [1] E.W. Stacy, "A Generalization of the Gamma Distribution",
Annals of Mathematical Statistics, Vol 33(3), pp. 1187--1192.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return _lazywhere((x != 0) | (c > 0), (x, c),
lambda x, c: (np.log(abs(c)) + sc.xlogy(c*a - 1, x)
- x**c - sc.gammaln(a)),
fillvalue=-np.inf)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _rvs(self, a, c, size=None, random_state=None):
r = random_state.standard_gamma(a, size=size)
return r**(1./c)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c > 0
def _get_support(self, c):
return self.a, 1.0/c
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class genhyperbolic_gen(rv_continuous):
r"""A generalized hyperbolic continuous random variable.
%(before_notes)s
See Also
--------
t, norminvgauss, geninvgauss, laplace, cauchy
Notes
-----
The probability density function for `genhyperbolic` is:
.. math::
f(x, p, a, b) =
\frac{(a^2 - b^2)^{p/2}}
{\sqrt{2\pi}a^{p-0.5}
K_p\Big(\sqrt{a^2 - b^2}\Big)}
e^{bx} \times \frac{K_{p - 1/2}
(a \sqrt{1 + x^2})}
{(\sqrt{1 + x^2})^{1/2 - p}}
for :math:`x, p \in ( - \infty; \infty)`,
:math:`|b| < a` if :math:`p \ge 0`,
:math:`|b| \le a` if :math:`p < 0`.
:math:`K_{p}(.)` denotes the modified Bessel function of the second
kind and order :math:`p` (`scipy.special.kn`)
`genhyperbolic` takes ``p`` as a tail parameter,
``a`` as a shape parameter,
``b`` as a skewness parameter.
%(after_notes)s
The original parameterization of the Generalized Hyperbolic Distribution
is found in [1]_ as follows
.. math::
f(x, \lambda, \alpha, \beta, \delta, \mu) =
\frac{(\gamma/\delta)^\lambda}{\sqrt{2\pi}K_\lambda(\delta \gamma)}
e^{\beta (x - \mu)} \times \frac{K_{\lambda - 1/2}
(\alpha \sqrt{\delta^2 + (x - \mu)^2})}
{(\sqrt{\delta^2 + (x - \mu)^2} / \alpha)^{1/2 - \lambda}}
for :math:`x \in ( - \infty; \infty)`,
:math:`\gamma := \sqrt{\alpha^2 - \beta^2}`,
:math:`\lambda, \mu \in ( - \infty; \infty)`,
:math:`\delta \ge 0, |\beta| < \alpha` if :math:`\lambda \ge 0`,
:math:`\delta > 0, |\beta| \le \alpha` if :math:`\lambda < 0`.
The location-scale-based parameterization implemented in
SciPy is based on [2]_, where :math:`a = \alpha\delta`,
:math:`b = \beta\delta`, :math:`p = \lambda`,
:math:`scale=\delta` and :math:`loc=\mu`
Moments are implemented based on [3]_ and [4]_.
For the distributions that are a special case such as Student's t,
it is not recommended to rely on the implementation of genhyperbolic.
To avoid potential numerical problems and for performance reasons,
the methods of the specific distributions should be used.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions
on Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978. https://www.jstor.org/stable/4615705
.. [2] Eberlein E., Prause K. (2002) The Generalized Hyperbolic Model:
Financial Derivatives and Risk Measures. In: Geman H., Madan D.,
Pliska S.R., Vorst T. (eds) Mathematical Finance - Bachelier
Congress 2000. Springer Finance. Springer, Berlin, Heidelberg.
:doi:`10.1007/978-3-662-12429-1_12`
.. [3] Scott, David J, Würtz, Diethelm, Dong, Christine and Tran,
Thanh Tam, (2009), Moments of the generalized hyperbolic
distribution, MPRA Paper, University Library of Munich, Germany,
https://EconPapers.repec.org/RePEc:pra:mprapa:19081.
.. [4] E. Eberlein and E. A. von Hammerstein. Generalized hyperbolic
and inverse Gaussian distributions: Limiting cases and approximation
of processes. FDM Preprint 80, April 2003. University of Freiburg.
https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
%(example)s
"""
def _argcheck(self, p, a, b):
return (np.logical_and(np.abs(b) < a, p >= 0)
| np.logical_and(np.abs(b) <= a, p < 0))
def _logpdf(self, x, p, a, b):
# kve instead of kv works better for large values of p
# and smaller values of sqrt(a^2 - b^2)
@np.vectorize
def _logpdf_single(x, p, a, b):
return _stats.genhyperbolic_logpdf(x, p, a, b)
return _logpdf_single(x, p, a, b)
def _pdf(self, x, p, a, b):
# kve instead of kv works better for large values of p
# and smaller values of sqrt(a^2 - b^2)
@np.vectorize
def _pdf_single(x, p, a, b):
return _stats.genhyperbolic_pdf(x, p, a, b)
return _pdf_single(x, p, a, b)
def _cdf(self, x, p, a, b):
@np.vectorize
def _cdf_single(x, p, a, b):
user_data = np.array(
[p, a, b], float
).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(
_stats, '_genhyperbolic_pdf', user_data
)
t1 = integrate.quad(llc, -np.inf, x)[0]
if np.isnan(t1):
msg = ("Infinite values encountered in scipy.special.kve. "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return t1
return _cdf_single(x, p, a, b)
def _rvs(self, p, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X has a
# generalized hyperbolic distribution
# if X is standard normal and V is
# geninvgauss(p = p, b = t2, loc = loc, scale = t3)
t1 = np.float_power(a, 2) - np.float_power(b, 2)
# b in the GIG
t2 = np.float_power(t1, 0.5)
# scale in the GIG
t3 = np.float_power(t1, - 0.5)
gig = geninvgauss.rvs(
p=p,
b=t2,
scale=t3,
size=size,
random_state=random_state
)
normst = norm.rvs(size=size, random_state=random_state)
return b * gig + np.sqrt(gig) * normst
def _stats(self, p, a, b):
# https://mpra.ub.uni-muenchen.de/19081/1/MPRA_paper_19081.pdf
# https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
# standardized moments
p, a, b = np.broadcast_arrays(p, a, b)
t1 = np.float_power(a, 2) - np.float_power(b, 2)
t1 = np.float_power(t1, 0.5)
t2 = np.float_power(1, 2) * np.float_power(t1, - 1)
integers = np.linspace(0, 4, 5)
# make integers perpendicular to existing dimensions
integers = integers.reshape(integers.shape + (1,) * p.ndim)
b0, b1, b2, b3, b4 = sc.kv(p + integers, t1)
r1, r2, r3, r4 = [b / b0 for b in (b1, b2, b3, b4)]
m = b * t2 * r1
v = (
t2 * r1 + np.float_power(b, 2) * np.float_power(t2, 2) *
(r2 - np.float_power(r1, 2))
)
m3e = (
np.float_power(b, 3) * np.float_power(t2, 3) *
(r3 - 3 * b2 * b1 * np.float_power(b0, -2) +
2 * np.float_power(r1, 3)) +
3 * b * np.float_power(t2, 2) *
(r2 - np.float_power(r1, 2))
)
s = m3e * np.float_power(v, - 3 / 2)
m4e = (
np.float_power(b, 4) * np.float_power(t2, 4) *
(r4 - 4 * b3 * b1 * np.float_power(b0, - 2) +
6 * b2 * np.float_power(b1, 2) * np.float_power(b0, - 3) -
3 * np.float_power(r1, 4)) +
np.float_power(b, 2) * np.float_power(t2, 3) *
(6 * r3 - 12 * b2 * b1 * np.float_power(b0, - 2) +
6 * np.float_power(r1, 3)) +
3 * np.float_power(t2, 2) * r2
)
k = m4e * np.float_power(v, -2) - 3
return m, v, s, k
genhyperbolic = genhyperbolic_gen(name='genhyperbolic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
def _average_with_log_weights(x, logweights):
x = np.asarray(x)
logweights = np.asarray(logweights)
maxlogw = logweights.max()
weights = np.exp(logweights - maxlogw)
return np.average(x, weights=weights)
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _sf(self, x):
return -sc.expm1(-np.exp(-x))
def _isf(self, p):
return -np.log(-np.log1p(-p))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
@_call_super_mom
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# if user has provided `floc` or `fscale`, fall back on super fit
# method. This scenario is not suitable for solving a system of
# equations
if floc is not None or fscale is not None:
return super().fit(data, *args, **kwds)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# account for user provided guesses
loc = kwds.pop('loc', loc)
scale = kwds.pop('scale', scale)
# By the method of maximum likelihood, the estimators of the
# location and scale are the roots of the equation defined in
# `func` and the value of the expression for `loc` that follows.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 101
def func(scale, data):
sdata = -data / scale
wavg = _average_with_log_weights(data, logweights=sdata)
return data.mean() - wavg - scale
soln = optimize.root(func, scale, args=(data,),
options={'xtol': 1e-14})
scale = soln.x[0]
loc = -scale * (sc.logsumexp(-data/scale) - np.log(len(data)))
return loc, scale
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
@_call_super_mom
def fit(self, data, *args, **kwds):
# The fit method of `gumbel_r` can be used for this distribution with
# small modifications. The process to do this is
# 1. pass the sign negated data into `gumbel_r.fit`
# 2. negate the sign of the resulting location, leaving the scale
# unmodified.
# `gumbel_r.fit` holds necessary input checks.
loc_r, scale_r, = gumbel_r.fit(-np.asarray(data), *args, **kwds)
return (-loc_r, scale_r)
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x >= 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return abs(random_state.standard_normal(size=size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, :math:`z > -1`,
and :math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
References
----------
.. [1] Armero, C., and M. J. Bayarri. "Prior Assessments for Prediction in
Queues." *Journal of the Royal Statistical Society*. Series D (The
Statistician) 43, no. 1 (1994): 139-53. doi:10.2307/2348939
%(example)s
"""
def _argcheck(self, a, b, c, z):
# z > -1 per gh-10134
return (a > 0) & (b > 0) & (c == c) & (z > -1)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``, and it is a
different parameterization of the scaled inverse chi-squared distribution.
Specifically, if the scaled inverse chi-squared distribution is
parameterized with degrees of freedom :math:`\nu` and scaling parameter
:math:`\tau^2`, then it can be modeled using `invgamma` with
``a=`` :math:`\nu/2` and ``scale=`` :math:`\nu \tau^2/2`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x >= 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu, size=None, random_state=None):
return random_state.wald(mu, 1.0, size=size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
# approach adapted from equations in
# https://journal.r-project.org/archive/2016-1/giner-smyth.pdf,
# not R code. see gh-13616
def _logcdf(self, x, mu):
fac = 1 / np.sqrt(x)
a = _norm_logcdf(fac * ((x / mu) - 1))
b = 2 / mu + _norm_logcdf(-fac * ((x / mu) + 1))
return a + np.log1p(np.exp(b - a))
def _logsf(self, x, mu):
fac = 1 / np.sqrt(x)
a = _norm_logsf(fac * ((x / mu) - 1))
b = 2 / mu + _norm_logcdf(-fac * (x + mu) / mu)
return a + np.log1p(-np.exp(b - a))
def _sf(self, x, mu):
return np.exp(self._logsf(x, mu))
def _cdf(self, x, mu):
return np.exp(self._logcdf(x, mu))
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
def fit(self, data, *args, **kwds):
method = kwds.get('method', 'mle')
if type(self) == wald_gen or method.lower() == 'mm':
return super().fit(data, *args, **kwds)
data, fshape_s, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
'''
Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
and Peacock (2000), Page 121. Their shape parameter is equivilent to
SciPy's with the conversion `fshape_s = fshape / scale`.
MLE formulas are not used in 3 condtions:
- `loc` is not fixed
- `mu` is fixed
These cases fall back on the superclass fit method.
- `loc` is fixed but translation results in negative data raises
a `FitDataError`.
'''
if floc is None or fshape_s is not None:
return super().fit(data, *args, **kwds)
elif np.any(data - floc < 0):
raise FitDataError("invgauss", lower=0, upper=np.inf)
else:
data = data - floc
fshape_n = np.mean(data)
if fscale is None:
fscale = len(data) / (np.sum(data ** -1 - fshape_n ** -1))
fshape_s = fshape_n / fscale
return fshape_s, floc, fscale
invgauss = invgauss_gen(a=0.0, name='invgauss')
class geninvgauss_gen(rv_continuous):
r"""A Generalized Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `geninvgauss` is:
.. math::
f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
where `x > 0`, and the parameters `p, b` satisfy `b > 0` ([1]_).
:math:`K_p` is the modified Bessel function of second kind of order `p`
(`scipy.special.kv`).
%(after_notes)s
The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
`geninvgauss` with `p = -1/2`, `b = 1 / mu` and `scale = mu`.
Generating random variates is challenging for this distribution. The
implementation is based on [2]_.
References
----------
.. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
models for the generalized inverse gaussian distribution",
Stochastic Processes and their Applications 7, pp. 49--54, 1978.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
%(example)s
"""
def _argcheck(self, p, b):
return (p == p) & (b > 0)
def _logpdf(self, x, p, b):
# kve instead of kv works better for large values of b
# warn if kve produces infinite values and replace by nan
# otherwise c = -inf and the results are often incorrect
@np.vectorize
def logpdf_single(x, p, b):
return _stats.geninvgauss_logpdf(x, p, b)
z = logpdf_single(x, p, b)
if np.isnan(z).any():
msg = ("Infinite values encountered in scipy.special.kve(p, b). "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return z
def _pdf(self, x, p, b):
# relying on logpdf avoids overflow of x**(p-1) for large x and p
return np.exp(self._logpdf(x, p, b))
def _cdf(self, x, *args):
_a, _b = self._get_support(*args)
@np.vectorize
def _cdf_single(x, *args):
p, b = args
user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf',
user_data)
return integrate.quad(llc, _a, x)[0]
return _cdf_single(x, *args)
def _logquasipdf(self, x, p, b):
# log of the quasi-density (w/o normalizing constant) used in _rvs
return _lazywhere(x > 0, (x, p, b),
lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
-np.inf)
def _rvs(self, p, b, size=None, random_state=None):
# if p and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(p) and np.isscalar(b):
out = self._rvs_scalar(p, b, size, random_state)
elif p.size == 1 and b.size == 1:
out = self._rvs_scalar(p.item(), b.item(), size, random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
p, b = np.broadcast_arrays(p, b)
# p and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting p and b.
# bc is a tuple the same lenth as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(p.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([p, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, p, b, numsamples, random_state):
# following [2], the quasi-pdf is used instead of the pdf for the
# generation of rvs
invert_res = False
if not(numsamples):
numsamples = 1
if p < 0:
# note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
p = -p
invert_res = True
m = self._mode(p, b)
# determine method to be used following [2]
ratio_unif = True
if p >= 1 or b > 1:
# ratio of uniforms with mode shift below
mode_shift = True
elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
# ratio of uniforms without mode shift below
mode_shift = False
else:
# new algorithm in [2]
ratio_unif = False
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
x = np.zeros(N)
simulated = 0
if ratio_unif:
# use ratio of uniforms method
if mode_shift:
a2 = -2 * (p + 1) / b - m
a1 = 2 * m * (p - 1) / b - 1
# find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
p1 = a1 - a2**2 / 3
q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
s1 = -np.sqrt(-4 * p1 / 3)
root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
root2 = -s1 * np.cos(phi / 3) - a2 / 3
# root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
# if g is the quasipdf, rescale: g(x) / g(m) which we can write
# as exp(log(g(x)) - log(g(m))). This is important
# since for large values of p and b, g cannot be evaluated.
# denote the rescaled quasipdf by h
lm = self._logquasipdf(m, p, b)
d1 = self._logquasipdf(root1, p, b) - lm
d2 = self._logquasipdf(root2, p, b) - lm
# compute the bounding rectangle w.r.t. h. Note that
# np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
vmin = (root1 - m) * np.exp(0.5 * d1)
vmax = (root2 - m) * np.exp(0.5 * d2)
umax = 1 # umax = sqrt(h(m)) = 1
logqpdf = lambda x: self._logquasipdf(x, p, b) - lm
c = m
else:
# ratio of uniforms without mode shift
# compute np.sqrt(quasipdf(m))
umax = np.exp(0.5*self._logquasipdf(m, p, b))
xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
vmin = 0
# compute xplus * np.sqrt(quasipdf(xplus))
vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
c = 0
logqpdf = lambda x: self._logquasipdf(x, p, b)
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
i = 1
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u = umax * random_state.uniform(size=k)
v = random_state.uniform(size=k)
v = vmin + (vmax - vmin) * v
rvs = v / u + c
# rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
accept = (2*np.log(u) <= logqpdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated "
"in {} attempts. Sampling does not appear to "
"work for the provided parameters.".format(i*N))
raise RuntimeError(msg)
i += 1
else:
# use new algorithm in [2]
x0 = b / (1 - p)
xs = np.max((x0, 2 / b))
k1 = np.exp(self._logquasipdf(m, p, b))
A1 = k1 * x0
if x0 < 2 / b:
k2 = np.exp(-b)
if p > 0:
A2 = k2 * ((2 / b)**p - x0**p) / p
else:
A2 = k2 * np.log(2 / b**2)
else:
k2, A2 = 0, 0
k3 = xs**(p - 1)
A3 = 2 * k3 * np.exp(-xs * b / 2) / b
A = A1 + A2 + A3
# [2]: rejection constant is < 2.73; so expected runtime is finite
while simulated < N:
k = N - simulated
h, rvs = np.zeros(k), np.zeros(k)
# simulate uniform rvs on [x1, x2] and [0, y2]
u = random_state.uniform(size=k)
v = A * random_state.uniform(size=k)
cond1 = v <= A1
cond2 = np.logical_not(cond1) & (v <= A1 + A2)
cond3 = np.logical_not(cond1 | cond2)
# subdomain (0, x0)
rvs[cond1] = x0 * v[cond1] / A1
h[cond1] = k1
# subdomain (x0, 2 / b)
if p > 0:
rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
else:
rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
h[cond2] = k2 * rvs[cond2]**(p - 1)
# subdomain (xs, infinity)
z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
rvs[cond3] = -2 / b * np.log(z)
h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
# apply rejection method
accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
num_accept = sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
rvs = np.reshape(x, size1d)
if invert_res:
rvs = 1 / rvs
return rvs
def _mode(self, p, b):
# distinguish cases to avoid catastrophic cancellation (see [2])
if p < 1:
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
else:
return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
def _munp(self, n, p, b):
num = sc.kve(p + n, b)
denom = sc.kve(p, b)
inf_vals = np.isinf(num) | np.isinf(denom)
if inf_vals.any():
msg = ("Infinite values encountered in the moment calculation "
"involving scipy.special.kve. Values replaced by NaN to "
"avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
m = np.full_like(num, np.nan, dtype=np.double)
m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
else:
m = num / denom
return m
geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
\exp(\sqrt{a^2 - b^2} + b x)
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
Another common parametrization of the distribution (see Equation 2.1 in
[2]_) is given by the following expression of the pdf:
.. math::
g(x, \alpha, \beta, \delta, \mu) =
\frac{\alpha\delta K_1\left(\alpha\sqrt{\delta^2 + (x - \mu)^2}\right)}
{\pi \sqrt{\delta^2 + (x - \mu)^2}} \,
e^{\delta \sqrt{\alpha^2 - \beta^2} + \beta (x - \mu)}
In SciPy, this corresponds to
`a = alpha * delta, b = beta * delta, loc = mu, scale=delta`.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
.. [2] O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and
Stochastic Volatility Modelling", Scandinavian Journal of
Statistics, Vol. 24, pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
return b * ig + np.sqrt(ig) * norm.rvs(size=size,
random_state=random_state)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
u"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
def _fitstart(self, data, args=None):
# invweibull requires c > 1 for the first moment to exist, so use 2.0
args = (2.0,) if args is None else args
return super(invweibull_gen, self)._fitstart(data, args=args)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`
and :math:`x \in [0,1]`. :math:`\phi` is the pdf of the normal
distribution.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`.
:math:`\phi` is the pdf of the normal distribution.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.laplace(0, 1, size=size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
with np.errstate(over='ignore'):
return np.where(x > 0, 1.0 - 0.5*np.exp(-x), 0.5*np.exp(x))
def _sf(self, x):
# By symmetry...
return self._cdf(-x)
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _isf(self, q):
# By symmetry...
return -self._ppf(q)
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the Laplace distribution parameters, so the keyword
arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 124
if floc is None:
floc = np.median(data)
if fscale is None:
fscale = (np.sum(np.abs(data - floc))) / len(data)
return floc, fscale
laplace = laplace_gen(name='laplace')
class laplace_asymmetric_gen(rv_continuous):
r"""An asymmetric Laplace continuous random variable.
%(before_notes)s
See Also
--------
laplace : Laplace distribution
Notes
-----
The probability density function for `laplace_asymmetric` is
.. math::
f(x, \kappa) &= \frac{1}{\kappa+\kappa^{-1}}\exp(-x\kappa),\quad x\ge0\\
&= \frac{1}{\kappa+\kappa^{-1}}\exp(x/\kappa),\quad x<0\\
for :math:`-\infty < x < \infty`, :math:`\kappa > 0`.
`laplace_asymmetric` takes ``kappa`` as a shape parameter for
:math:`\kappa`. For :math:`\kappa = 1`, it is identical to a
Laplace distribution.
%(after_notes)s
References
----------
.. [1] "Asymmetric Laplace distribution", Wikipedia
https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution
.. [2] Kozubowski TJ and Podgórski K. A Multivariate and
Asymmetric Generalization of Laplace Distribution,
Computational Statistics 15, 531--540 (2000).
:doi:`10.1007/PL00022717`
%(example)s
"""
def _pdf(self, x, kappa):
return np.exp(self._logpdf(x, kappa))
def _logpdf(self, x, kappa):
kapinv = 1/kappa
lPx = x * np.where(x >= 0, -kappa, kapinv)
lPx -= np.log(kappa+kapinv)
return lPx
def _cdf(self, x, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(x >= 0,
1 - np.exp(-x*kappa)*(kapinv/kappkapinv),
np.exp(x*kapinv)*(kappa/kappkapinv))
def _sf(self, x, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(x >= 0,
np.exp(-x*kappa)*(kapinv/kappkapinv),
1 - np.exp(x*kapinv)*(kappa/kappkapinv))
def _ppf(self, q, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(q >= kappa/kappkapinv,
-np.log((1 - q)*kappkapinv*kappa)*kapinv,
np.log(q*kappkapinv/kappa)*kappa)
def _isf(self, q, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(q <= kapinv/kappkapinv,
-np.log(q*kappkapinv*kappa)*kapinv,
np.log((1 - q)*kappkapinv/kappa)*kappa)
def _stats(self, kappa):
kapinv = 1/kappa
mn = kapinv - kappa
var = kapinv*kapinv + kappa*kappa
g1 = 2.0*(1-np.power(kappa, 6))/np.power(1+np.power(kappa, 4), 1.5)
g2 = 6.0*(1+np.power(kappa, 8))/np.power(1+np.power(kappa, 4), 2)
return mn, var, g1, g2
def _entropy(self, kappa):
return 1 + np.log(kappa+1/kappa)
laplace_asymmetric = laplace_asymmetric_gen(name='laplace_asymmetric')
def _check_fit_input_parameters(dist, data, args, kwds):
data = np.asarray(data)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
num_shapes = len(dist.shapes.split(",")) if dist.shapes else 0
fshape_keys = []
fshapes = []
# user has many options for fixing the shape, so here we standardize it
# into 'f' + the number of the shape.
# Adapted from `_reduce_func` in `_distn_infrastructure.py`:
if dist.shapes:
shapes = dist.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
fshape_keys.append(key)
fshapes.append(val)
if val is not None:
kwds[key] = val
# determine if there are any unknown arguments in kwds
known_keys = {'loc', 'scale', 'optimizer', 'method',
'floc', 'fscale', *fshape_keys}
unknown_keys = set(kwds).difference(known_keys)
if unknown_keys:
raise TypeError(f"Unknown keyword arguments: {unknown_keys}.")
if len(args) > num_shapes:
raise TypeError("Too many positional arguments.")
if None not in {floc, fscale, *fshapes}:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise RuntimeError("All parameters fixed. There is nothing to "
"optimize.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
return (data, *fshapes, floc, fscale)
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x >= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _sf(self, x):
return sc.erf(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x <= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _sf(self, x):
ax = abs(x)
return 2 * _norm_sf(1 / np.sqrt(ax))
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _isf(self, p):
return -1/_norm_isf(p/2)**2
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
Remark that the survival function (``logistic.sf``) is equal to the
Fermi-Dirac distribution describing fermionic statistics.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.logistic(size=size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
y = -np.abs(x)
return y - 2. * sc.log1p(np.exp(y))
def _cdf(self, x):
return sc.expit(x)
def _logcdf(self, x):
return sc.log_expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _logsf(self, x):
return sc.log_expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
@_call_super_mom
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# if user has provided `floc` or `fscale`, fall back on super fit
# method. This scenario is not suitable for solving a system of
# equations
if floc is not None or fscale is not None:
return super().fit(data, *args, **kwds)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# account for user provided guesses
loc = kwds.pop('loc', loc)
scale = kwds.pop('scale', scale)
# the maximum likelihood estimators `a` and `b` of the location and
# scale parameters are roots of the two equations described in `func`.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings, and
# Peacock (2000), Page 130
def func(params, data):
a, b = params
n = len(data)
c = (data - a) / b
x1 = np.sum(sc.expit(c)) - n/2
x2 = np.sum(c*np.tanh(c/2)) - n
return x1, x2
return tuple(optimize.root(func, (loc, scale), args=(data,)).x)
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return np.log(random_state.gamma(c, size=size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _logpdf(self, x, c):
return c*x - np.exp(x) - sc.gammaln(c)
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _sf(self, x, c):
return sc.gammaincc(c, np.exp(x))
def _isf(self, q, c):
return np.log(sc.gammainccinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s, size=None, random_state=None):
return np.exp(s * random_state.standard_normal(size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'` and
the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.
\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# fall back on the default fit method.
return super().fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer', 'method']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return np.exp(random_state.standard_normal(size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x >= 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return chi.rvs(3.0, size=size, random_state=random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return _SQRT_2_OVER_PI*x*x*np.exp(-x*x/2.0)
def _logpdf(self, x):
# Allow x=0 without 'divide by zero' warnings
with np.errstate(divide='ignore'):
return _LOG_SQRT_2_OVER_PI + 2*np.log(x) - 0.5*x*x
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke Beta-Kappa / Dagum continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
called Dagum distribution ([2]_). It was already defined in [3]_, called
a Burr Type III distribution (`burr` with parameters ``c=s`` and
``d=k/s``).
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
References
----------
.. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
.. [2] Dagum, C., 1977 "A new model for personal income distribution."
Economie Appliquee, 33, 327-367.
.. [3] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
def _argcheck(self, k, s):
return (k > 0) & (s > 0)
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _logpdf(self, x, k, s):
# Allow x=0 without 'divide by zero' warnings.
with np.errstate(divide='ignore'):
return np.log(k) + np.log(x)*(k - 1) - np.log1p(x**s)*(1 + k/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
def _munp(self, n, k, s):
def nth_moment(n, k, s):
# n-th moment is defined for -k < n < s
return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
:doi:`10.4236/jwarp.2012.410101`
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
shape = np.broadcast_arrays(h, k)[0].shape
return np.full(shape, fill_value=True)
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - np.float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _get_stats_info(self, h, k):
condlist = [
np.logical_and(h < 0, k >= 0),
k < 0,
]
def f0(h, k):
return (-1.0/h*k).astype(int)
def f1(h, k):
return (-1.0/k).astype(int)
return _lazyselect(condlist, [f0, f1], [h, k], default=5)
def _stats(self, h, k):
maxr = self._get_stats_info(h, k)
outputs = [None if np.any(r < maxr) else np.nan for r in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
maxr = self._get_stats_info(args[0], args[1])
if m >= maxr:
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
:doi:`10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2`
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), :doi:`10.4236/ojs.2012.24050`
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if np.any(i < a) else np.nan for i in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
if np.any(m >= args[0]):
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self, size=None, random_state=None):
u1 = gamma.rvs(a=0.5, scale=2, size=size,
random_state=random_state)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`. The distribution was introduced in
[2]_, see also [1]_ for further information.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
References
----------
.. [1] "Nakagami distribution", Wikipedia
https://en.wikipedia.org/wiki/Nakagami_distribution
.. [2] M. Nakagami, "The m-distribution - A general formula of intensity
distribution of rapid fading", Statistical methods in radio wave
propagation, Pergamon Press, 1960, 3-36.
:doi:`10.1016/B978-0-08-009306-2.50005-4`
%(example)s
"""
def _pdf(self, x, nu):
return np.exp(self._logpdf(x, nu))
def _logpdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return (np.log(2) + sc.xlogy(nu, nu) - sc.gammaln(nu) +
sc.xlogy(2*nu - 1, x) - nu*x**2)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _sf(self, x, nu):
return sc.gammaincc(nu, nu*x*x)
def _isf(self, p, nu):
return np.sqrt(1/nu * sc.gammainccinv(nu, p))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
def _rvs(self, nu, size=None, random_state=None):
# this relationship can be found in [1] or by a direct calculation
return np.sqrt(random_state.standard_gamma(nu, size=size) / nu)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,) * self.numargs
# Analytical justified estimates
# see: https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_nakagami.html
loc = np.min(data)
scale = np.sqrt(np.sum((data - loc)**2) / len(data))
return args + (loc, scale)
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0` and :math:`k, \lambda > 0`. :math:`k` specifies the
degrees of freedom (denoted ``df`` in the implementation) and
:math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the
implementation). :math:`I_\nu` denotes the modified Bessel function of
first order of degree :math:`\nu` (`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc >= 0)
def _rvs(self, df, nc, size=None, random_state=None):
return random_state.noncentral_chisquare(df, nc, size)
def _logpdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf, f2=chi2.logpdf)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_pdf, f2=chi2.pdf)
def _cdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_cdf, f2=chi2.cdf)
def _ppf(self, q, df, nc):
cond = np.ones_like(q, dtype=bool) & (nc != 0)
return _lazywhere(cond, (q, df, nc), f=sc.chndtrix, f2=chi2.ppf)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
See Also
--------
scipy.stats.f : Fisher distribution
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp\left(\frac{\lambda}{2} +
\lambda n_1 \frac{x}{2(n_1 x + n_2)}
\right)
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2 + n_1 x)^{-(n_1 + n_2)/2}
\gamma(n_1/2) \gamma(1 + n_2/2) \\
\frac{L^{\frac{n_1}{2}-1}_{n_2/2}
\left(-\lambda n_1 \frac{x}{2(n_1 x + n_2)}\right)}
{B(n_1/2, n_2/2)
\gamma\left(\frac{n_1 + n_2}{2}\right)}
for :math:`n_1, n_2 > 0`, :math:`\lambda \ge 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. If ``nc=0``,
the distribution becomes equivalent to the Fisher distribution.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df1, df2, nc):
return (df1 > 0) & (df2 > 0) & (nc >= 0)
def _rvs(self, dfn, dfd, nc, size=None, random_state=None):
return random_state.noncentral_f(dfn, dfd, nc, size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
# Note: the rv_continuous class ensures that dfn > 0 when this function
# is called, so we don't have to check for division by zero with dfn
# in the following.
mu_num = dfd * (dfn + nc)
mu_den = dfn * (dfd - 2)
mu = np.full_like(mu_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu_num, mu_den, where=dfd > 2, out=mu)
mu2_num = 2*((dfn + nc)**2 + (dfn + 2*nc)*(dfd - 2))*(dfd/dfn)**2
mu2_den = (dfd - 2)**2 * (dfd - 4)
mu2 = np.full_like(mu2_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu2_num, mu2_den, where=dfd > 4, out=mu2)
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
For the noncentral t distribution, see `nct`.
%(before_notes)s
See Also
--------
nct
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _argcheck(self, df):
return df > 0
def _rvs(self, df, size=None, random_state=None):
return random_state.standard_t(df, size=size)
def _pdf(self, x, df):
# gamma((df+1)/2)
# t.pdf(x, df) = ---------------------------------------------------
# sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
r = np.asarray(df*1.0)
Px = (np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
/ (np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)))
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = (sc.gammaln((r+1)/2) - sc.gammaln(r/2)
- (0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)))
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu = np.where(df > 1, 0.0, np.inf)
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
mu2 = np.where(df <= 1, np.nan, mu2)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.inf)
g2 = np.where(df <= 2, np.nan, g2)
return mu, mu2, g1, g2
def _entropy(self, df):
half = df/2
half1 = (df + 1)/2
return (half1*(sc.digamma(half1) - sc.digamma(half))
+ np.log(np.sqrt(df)*sc.beta(half, 0.5)))
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nc`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc, size=None, random_state=None):
n = norm.rvs(loc=nc, size=size, random_state=random_state)
c2 = chi2.rvs(df, size=size, random_state=random_state)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = (n/2.*np.log(n) + sc.gammaln(n+1)
- (n*np.log(2) + nc*nc/2 + (n/2)*np.log(fac1)
+ sc.gammaln(n/2)))
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = (np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
/ np.asarray(fac1*sc.gamma((n+1)/2)))
trm2 = (sc.hyp1f1((n+1)/2, 0.5, valF)
/ np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. https://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = np.exp(sc.betaln(df/2-0.5, 0.5) - sc.gammaln(0.5))
c11 = np.sqrt(df/2.) * gfac
c20 = np.where(df > 2., df / (df-2.), np.nan)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.nan)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.nan)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = np.full(np.shape(b), fill_value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = np.full(np.shape(b), fill_value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = np.full(np.shape(b), fill_value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = np.full(np.shape(b), fill_value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
@_call_super_mom
def fit(self, data, *args, **kwds):
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fshape, floc, fscale = parameters
if floc is None:
return super().fit(data, **kwds)
if np.any(data - floc < (fscale if fscale else 0)):
raise FitDataError("pareto", lower=1, upper=np.inf)
data = data - floc
# Source: Evans, Hastings, and Peacock (2000), Statistical
# Distributions, 3rd. Ed., John Wiley and Sons. Page 149.
if fscale is None:
fscale = np.min(data)
if fshape is None:
fshape = 1/((1/len(data)) * np.sum(np.log(data/fscale)))
return fshape, floc, fscale
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, \kappa) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{\kappa}
\alpha = \beta^2 = \frac{4}{\kappa^2}
\zeta = -\frac{\alpha}{\beta} = -\beta
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
Pass the skew :math:`\kappa` into `pearson3` as the shape parameter
``skew``.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays(1.0, x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
m = 0.0
v = 1.0
s = skew
k = 1.5*skew**2
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
# use logpdf instead of _logpdf to fix issue mentioned in gh-12640
# (_logpdf does not return correct result for alpha = 1)
ans[invmask] = np.log(abs(beta)) + gamma.logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
skew = np.broadcast_to(skew, invmask.shape)
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
# use cdf instead of _cdf to fix issue mentioned in gh-12640
# (_cdf produces NaNs for inputs outside support)
ans[invmask1a] = gamma.cdf(transx[invmask1b], alpha[invmask1b])
# The gamma._cdf approach wasn't working with negative skew.
# Note that multiplying the skew by -1 reflects about x=0.
# So instead of evaluating the CDF with negative skew at x,
# evaluate the SF with positive skew at -x.
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
# gamma._sf produces NaNs when transx < 0, so use gamma.sf
ans[invmask2a] = gamma.sf(transx[invmask2b], alpha[invmask2b])
return ans
def _rvs(self, skew, size=None, random_state=None):
skew = np.broadcast_to(skew, size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = random_state.standard_normal(nsmall)
ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
if size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Note that method of moments (`method='MM'`) is not
available for this distribution.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.get("method", None) == 'MM':
raise NotImplementedError("Fit `method='MM'` is not available for "
"the Pearson3 distribution. Please try "
"the default `method='MLE'`.")
else:
return super(type(self), self).fit(data, *args, **kwds)
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x >= 0`, :math:`c > 0`.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed (symmetric beta) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` is also called the
symmetric beta distribution: if B has a `beta` distribution with
parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with
parameter c.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 3: `semicircular`
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
# use relation to the beta distribution for pdf, cdf, etc
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return -np.log(2) + beta._logpdf((x + 1)/2, c/2, c/2)
def _cdf(self, x, c):
return beta._cdf((x + 1)/2, c/2, c/2)
def _ppf(self, q, c):
return 2*beta._ppf(q, c/2, c/2) - 1
def _rvs(self, c, size=None, random_state=None):
return 2 * random_state.beta(c/2, c/2, size) - 1
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
def _rayleigh_fit_check_error(ier, msg):
if ier != 1:
raise RuntimeError('rayleigh.fit: fsolve failed to find the root of '
'the first-order conditions of the log-likelihood '
f'function: {msg} (ier={ier})')
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return chi.rvs(2, size=size, random_state=random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Notes specifically for ``rayleigh.fit``: If the location is fixed with
the `floc` parameter, this method uses an analytical formula to find
the scale. Otherwise, this function uses a numerical root finder on
the first order conditions of the log-likelihood function to find the
MLE. Only the (optional) `loc` parameter is used as the initial guess
for the root finder; the `scale` parameter and any other parameters
for the optimizer are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
def scale_mle(loc, data):
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 175
return (np.sum((data - loc) ** 2) / (2 * len(data))) ** .5
def loc_mle(loc, data):
# This implicit equation for `loc` is used when
# both `loc` and `scale` are free.
xm = data - loc
s1 = xm.sum()
s2 = (xm**2).sum()
s3 = (1/xm).sum()
return s1 - s2/(2*len(data))*s3
def loc_mle_scale_fixed(loc, scale, data):
# This implicit equation for `loc` is used when
# `scale` is fixed but `loc` is not.
xm = data - loc
return xm.sum() - scale**2 * (1/xm).sum()
if floc is not None:
# `loc` is fixed, analytically determine `scale`.
if np.any(data - floc <= 0):
raise FitDataError("rayleigh", lower=1, upper=np.inf)
else:
return floc, scale_mle(floc, data)
# Account for user provided guess of `loc`.
loc0 = kwds.get('loc')
if loc0 is None:
# Use _fitstart to estimate loc; ignore the returned scale.
loc0 = self._fitstart(data)[0]
if fscale is not None:
# `scale` is fixed
x, info, ier, msg = optimize.fsolve(loc_mle_scale_fixed, x0=loc0,
args=(fscale, data,),
xtol=1e-10, full_output=True)
_rayleigh_fit_check_error(ier, msg)
return x[0], fscale
else:
# Neither `loc` nor `scale` are fixed.
x, info, ier, msg = optimize.fsolve(loc_mle, x0=loc0, args=(data,),
xtol=1e-10, full_output=True)
_rayleigh_fit_check_error(ier, msg)
return x[0], scale_mle(x[0], data)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A loguniform or reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for this class is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`. This class takes
:math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
This doesn't show the equal probability of ``0.01``, ``0.1`` and
``1``. This is best when the x-axis is log-scaled:
>>> import numpy as np
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log10(r))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$10^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
This random variable will be log-uniform regardless of the base chosen for
``a`` and ``b``. Let's specify with base ``2`` instead:
>>> rvs = %(name)s(2**-2, 2**0).rvs(size=1000)
Values of ``1/4``, ``1/2`` and ``1`` are equally likely with this random
variable. Here's the histogram:
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log2(rvs))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$2^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
"""
def _argcheck(self, a, b):
return (a > 0) & (b > a)
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * np.log(b * 1.0 / a))
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(np.log(b * 1.0 / a))
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / np.log(b * 1.0 / a)
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/np.log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b*1.0/a))
loguniform = reciprocal_gen(name="loguniform")
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b, size=None, random_state=None):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + random_state.standard_normal(size=(2,) + size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return np.exp(self._logpdf(x, mu))
def _logpdf(self, x, mu):
return _lazywhere(x > 0, (x, mu),
lambda x, mu: (-(1 - mu*x)**2.0 / (2*x*mu**2.0)
- 0.5*np.log(2*np.pi*x)),
fillvalue=-np.inf)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return _norm_cdf(-isqx*trm1) - np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _sf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return _norm_cdf(isqx*trm1) + np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu, size=None, random_state=None):
return 1.0/random_state.wald(mu, 1.0, size=size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
See Also
--------
rdist
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
The distribution is a special case of `rdist` with `c = 3`.
%(after_notes)s
References
----------
.. [1] "Wigner semicircle distribution",
https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _logpdf(self, x):
return np.log(2/np.pi) + 0.5*sc.log1p(-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _ppf(self, q):
return rdist._ppf(q, 3)
def _rvs(self, size=None, random_state=None):
# generate values uniformly distributed on the area under the pdf
# (semi-circle) by randomly generating the radius and angle
r = np.sqrt(random_state.uniform(size=size))
a = np.cos(np.pi * random_state.uniform(size=size))
return r * a
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skewcauchy_gen(rv_continuous):
r"""A skewed Cauchy random variable.
%(before_notes)s
See Also
--------
cauchy : Cauchy distribution
Notes
-----
The probability density function for `skewcauchy` is:
.. math::
f(x) = \frac{1}{\pi \left(\frac{x^2}{\left(a\, \text{sign}(x) + 1
\right)^2} + 1 \right)}
for a real number :math:`x` and skewness parameter :math:`-1 < a < 1`.
When :math:`a=0`, the distribution reduces to the usual Cauchy
distribution.
%(after_notes)s
References
----------
.. [1] "Skewed generalized *t* distribution", Wikipedia
https://en.wikipedia.org/wiki/Skewed_generalized_t_distribution#Skewed_Cauchy_distribution
%(example)s
"""
def _argcheck(self, a):
return np.abs(a) < 1
def _pdf(self, x, a):
return 1 / (np.pi * (x**2 / (a * np.sign(x) + 1)**2 + 1))
def _cdf(self, x, a):
return np.where(x <= 0,
(1 - a) / 2 + (1 - a) / np.pi * np.arctan(x / (1 - a)),
(1 - a) / 2 + (1 + a) / np.pi * np.arctan(x / (1 + a)))
def _ppf(self, x, a):
i = x < self._cdf(0, a)
return np.where(i,
np.tan(np.pi / (1 - a) * (x - (1 - a) / 2)) * (1 - a),
np.tan(np.pi / (1 + a) * (x - (1 - a) / 2)) * (1 + a))
def _stats(self, a, moments='mvsk'):
return np.nan, np.nan, np.nan, np.nan
def _fitstart(self, data):
# Use 0 as the initial guess of the skewness shape parameter.
# For the location and scale, estimate using the median and
# quartiles.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return 0.0, p50, (p75 - p25)/2
skewcauchy = skewcauchy_gen(name='skewcauchy')
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
:arxiv:`0911.2093`
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
if x <= 0:
cdf = integrate.quad(self._pdf, _a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, _a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a, size=None, random_state=None):
u0 = random_state.normal(size=size)
v = random_state.normal(size=size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapezoid_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. This
defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat
top from ``c`` to ``d`` proportional to the position along the base
with ``0 <= c <= d <= 1``. When ``c=d``, this is equivalent to `triang`
with the same values for `loc`, `scale` and `c`.
The method of [1]_ is used for computing moments.
`trapezoid` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
References
----------
.. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular
distributions for Type B evaluation of standard uncertainty.
Metrologia 44, 117-127. :doi:`10.1088/0026-1394/44/2/003`
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
def _munp(self, n, c, d):
# Using the parameterization from Kacker, 2007, with
# a=bottom left, c=top left, d=top right, b=bottom right, then
# E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d)
# - ((c^{n+2} - a^{n+2})/(c-a)]
# with h = 2/((b-a) - (d-c)). The corresponding parameterization
# in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale,
# which for standard form reduces to a'=0, b'=1, c'=c, d'=d.
# Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d)
# and the ac' term as c^{n-1} for the standard form. The bd' term has
# numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d)
# with expm1((n+2)*log(d))/(d-1).
# Testing with n=18 for c=(1e-30,1-eps) shows that this is stable.
# We still require an explicit test for d=1 to prevent divide by zero,
# and now a test for d=0 to prevent log(0).
ab_term = c**(n+1)
dc_term = _lazyselect(
[d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0],
[lambda d: 1.0,
lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0),
lambda d: n+2],
[d])
val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2))
return val
def _entropy(self, c, d):
# Using the parameterization from Wikipedia (van Dorp, 2003)
# with a=bottom left, c=top left, d=top right, b=bottom right
# gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale,
# which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1.
# Substituting into the entropy formula from Wikipedia gives
# the following result.
return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c))
trapezoid = trapezoid_gen(a=0.0, b=1.0, name="trapezoid")
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
trapz = trapezoid_gen(a=0.0, b=1.0, name="trapz")
if trapz.__doc__:
trapz.__doc__ = "trapz is an alias for `trapezoid`"
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return random_state.triangular(0, c, 1, size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 <= x <= b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
return b > 0
def _get_support(self, b):
return self.a, b
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
TRUNCNORM_TAIL_X = 30
TRUNCNORM_MAX_BRENT_ITERS = 40
def _truncnorm_get_delta_scalar(a, b):
if (a > TRUNCNORM_TAIL_X) or (b < -TRUNCNORM_TAIL_X):
return 0
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
return delta
def _truncnorm_get_delta(a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_get_delta_scalar(a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_get_delta_scalar(a.item(), b.item())
delta = np.zeros(np.shape(a))
condinner = (a <= TRUNCNORM_TAIL_X) & (b >= -TRUNCNORM_TAIL_X)
conda = (a > 0) & condinner
condb = (a <= 0) & condinner
if np.any(conda):
np.place(delta, conda, _norm_sf(a[conda]) - _norm_sf(b[conda]))
if np.any(condb):
np.place(delta, condb, _norm_cdf(b[condb]) - _norm_cdf(a[condb]))
delta[delta < 0] = 0
return delta
def _truncnorm_get_logdelta_scalar(a, b):
if (a <= TRUNCNORM_TAIL_X) and (b >= -TRUNCNORM_TAIL_X):
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
if delta > 0:
return np.log(delta)
if b < 0 or (np.abs(a) >= np.abs(b)):
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
logdelta = nlb + np.log1p(-np.exp(nla - nlb))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
logdelta = sla + np.log1p(-np.exp(slb - sla))
return logdelta
def _truncnorm_logpdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return -np.inf
if x > b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, -np.inf)
if np.any(condgtb):
np.place(out, condgtb, -np.inf)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
_logdelta = _truncnorm_get_logdelta_scalar(a, b)
np.place(out, cond_inner, _norm_logpdf(x[cond_inner]) - _logdelta)
return (out[0] if (shp == ()) else out)
def _truncnorm_pdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return 0.0
if x > b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, 0.0)
if np.any(condgtb):
np.place(out, condgtb, 0.0)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, _norm_pdf(x[cond_inner]) / delta)
else:
np.place(out, cond_inner,
np.exp(_truncnorm_logpdf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logcdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -np.inf
if x >= b:
return 0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, -np.inf)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
np.log((_norm_cdf(x[cond_inner]) - _norm_cdf(a))
/ delta))
else:
with np.errstate(divide='ignore'):
if a < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
tab = np.log1p(-np.exp(nla - nlb))
nlx = _norm_logcdf(x[cond_inner])
tax = np.log1p(-np.exp(nla - nlx))
np.place(out, cond_inner, nlx + tax - (nlb + tab))
else:
sla = _norm_logsf(a)
slb = _norm_logsf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logsf(x[cond_inner])
- sla))
- np.log1p(-np.exp(slb - sla)))
return (out[0] if (shp == ()) else out)
def _truncnorm_cdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -0
if x >= b:
return 1
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, 1.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
(_norm_cdf(x[cond_inner]) - _norm_cdf(a)) / delta)
else:
with np.errstate(divide='ignore'):
np.place(out, cond_inner,
np.exp(_truncnorm_logcdf_scalar(x[cond_inner],
a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logsf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 0.0
if x >= b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, -np.inf)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
np.log((_norm_sf(x[cond_inner]) - _norm_sf(b))
/ delta))
else:
with np.errstate(divide='ignore'):
if b < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logcdf(x[cond_inner])
- nlb))
- np.log1p(-np.exp(nla - nlb)))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
tab = np.log1p(-np.exp(slb - sla))
slx = _norm_logsf(x[cond_inner])
tax = np.log1p(-np.exp(slb - slx))
np.place(out, cond_inner, slx + tax - (sla + tab))
return (out[0] if (shp == ()) else out)
def _truncnorm_sf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 1.0
if x >= b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 1.0)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
(_norm_sf(x[cond_inner]) - _norm_sf(b)) / delta)
else:
np.place(out, cond_inner,
np.exp(_truncnorm_logsf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _norm_logcdfprime(z):
# derivative of special.log_ndtr (See special/cephes/ndtr.c)
# Differentiate formula for log Phi(z)_truncnorm_ppf
# log Phi(z) = -z^2/2 - log(-z) - log(2pi)/2
# + log(1 + sum (-1)^n (2n-1)!! / z^(2n))
# Convergence of series is slow for |z| < 10, but can use
# d(log Phi(z))/dz = dPhi(z)/dz / Phi(z)
# Just take the first 10 terms because that is sufficient for use
# in _norm_ilogcdf
assert np.all(z <= -10)
lhs = -z - 1/z
denom_cons = 1/z**2
numerator = 1
pwr = 1.0
denom_total, numerator_total = 0, 0
sign = -1
for i in range(1, 11):
pwr *= denom_cons
numerator *= 2 * i - 1
term = sign * numerator * pwr
denom_total += term
numerator_total += term * (2 * i) / z
sign = -sign
return lhs - numerator_total / (1 + denom_total)
def _norm_ilogcdf(y):
"""Inverse function to _norm_logcdf==sc.log_ndtr."""
# Apply approximate Newton-Raphson
# Only use for very negative values of y.
# At minimum requires y <= -(log(2pi)+2^2)/2 ~= -2.9
# Much better convergence for y <= -10
z = -np.sqrt(-2 * (y + np.log(2*np.pi)/2))
for _ in range(4):
z = z - (_norm_logcdf(z) - y) / _norm_logcdfprime(z)
return z
def _truncnorm_ppf_scalar(q, a, b):
shp = np.shape(q)
q = np.atleast_1d(q)
out = np.zeros(np.shape(q))
condle0, condge1 = (q <= 0), (q >= 1)
if np.any(condle0):
out[condle0] = a
if np.any(condge1):
out[condge1] = b
delta = _truncnorm_get_delta_scalar(a, b)
cond_inner = ~condle0 & ~condge1
if np.any(cond_inner):
qinner = q[cond_inner]
if delta > 0:
if a > 0:
sa, sb = _norm_sf(a), _norm_sf(b)
np.place(out, cond_inner,
_norm_isf(qinner * sb + sa * (1.0 - qinner)))
else:
na, nb = _norm_cdf(a), _norm_cdf(b)
np.place(out, cond_inner,
_norm_ppf(qinner * nb + na * (1.0 - qinner)))
elif np.isinf(b):
np.place(out, cond_inner,
-_norm_ilogcdf(np.log1p(-qinner) + _norm_logsf(a)))
elif np.isinf(a):
np.place(out, cond_inner,
_norm_ilogcdf(np.log(q) + _norm_logcdf(b)))
else:
if b < 0:
# Solve
# norm_logcdf(x)
# = norm_logcdf(a) + log1p(q * (expm1(norm_logcdf(b)
# - norm_logcdf(a)))
# = nla + log1p(q * expm1(nlb - nla))
# = nlb + log(q) + log1p((1-q) * exp(nla - nlb)/q)
def _f_cdf(x, c):
return _norm_logcdf(x) - c
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
values = nlb + np.log(q[cond_inner])
C = np.exp(nla - nlb)
if C:
one_minus_q = (1 - q)[cond_inner]
values += np.log1p(one_minus_q * C / q[cond_inner])
x = [optimize._zeros_py.brentq(_f_cdf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS)
for c in values]
np.place(out, cond_inner, x)
else:
# Solve
# norm_logsf(x)
# = norm_logsf(b) + log1p((1-q) * (expm1(norm_logsf(a)
# - norm_logsf(b)))
# = slb + log1p((1-q)[cond_inner] * expm1(sla - slb))
# = sla + log(1-q) + log1p(q * np.exp(slb - sla)/(1-q))
def _f_sf(x, c):
return _norm_logsf(x) - c
sla, slb = _norm_logsf(a), _norm_logsf(b)
one_minus_q = (1-q)[cond_inner]
values = sla + np.log(one_minus_q)
C = np.exp(slb - sla)
if C:
values += np.log1p(q[cond_inner] * C / one_minus_q)
x = [optimize._zeros_py.brentq(_f_sf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS)
for c in values]
np.place(out, cond_inner, x)
out[out < a] = a
out[out > b] = b
return (out[0] if (shp == ()) else out)
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return a < b
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_pdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_pdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_pdf_scalar(_x, _a, _b)
return it.operands[3]
def _logpdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logpdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logpdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_logpdf_scalar(_x, _a, _b)
return it.operands[3]
def _cdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_cdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_cdf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_cdf_scalar(_x, _a, _b)
return it.operands[3]
def _logcdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logcdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logcdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logcdf_scalar(_x, _a, _b)
return it.operands[3]
def _sf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_sf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_sf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_sf_scalar(_x, _a, _b)
return it.operands[3]
def _logsf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logsf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logsf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logsf_scalar(_x, _a, _b)
return it.operands[3]
def _ppf(self, q, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_ppf_scalar(q, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_ppf_scalar(q, a.item(), b.item())
out = None
it = np.nditer([q, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_q, _a, _b, _x) in it:
_x[...] = _truncnorm_ppf_scalar(_q, _a, _b)
return it.operands[3]
def _munp(self, n, a, b):
def n_th_moment(n, a, b):
"""
Returns n-th moment. Defined only if n >= 0.
Function cannot broadcast due to the loop over n
"""
pA, pB = self._pdf([a, b], a, b)
probs = [pA, -pB]
moments = [0, 1]
for k in range(1, n+1):
# a or b might be infinite, and the corresponding pdf value
# is 0 in that case, but nan is returned for the
# multiplication. However, as b->infinity, pdf(b)*b**k -> 0.
# So it is safe to use _lazywhere to avoid the nan.
vals = _lazywhere(probs, [probs, [a, b]],
lambda x, y: x * y**(k-1), fillvalue=0)
mk = np.sum(vals) + (k-1) * moments[-2]
moments.append(mk)
return moments[-1]
return _lazywhere((n >= 0) & (a == a) & (b == b), (n, a, b),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.nan)
def _stats(self, a, b, moments='mv'):
pA, pB = self.pdf(np.array([a, b]), a, b)
def _truncnorm_stats_scalar(a, b, pA, pB, moments):
m1 = pA - pB
mu = m1
# use _lazywhere to avoid nan (See detailed comment in _munp)
probs = [pA, -pB]
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y,
fillvalue=0)
m2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a-mu, b-mu]], lambda x, y: x*y,
fillvalue=0)
# mu2 = m2 - mu**2, but not as numerically stable as:
# mu2 = (a-mu)*pA - (b-mu)*pB + 1
mu2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**2,
fillvalue=0)
m3 = 2*m1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**3,
fillvalue=0)
m4 = 3*m2 + np.sum(vals)
mu3 = m3 + m1 * (-3*m2 + 2*m1**2)
g1 = mu3 / np.power(mu2, 1.5)
mu4 = m4 + m1*(-4*m3 + 3*m1*(2*m2 - m1**2))
g2 = mu4 / mu2**2 - 3
return mu, mu2, g1, g2
_truncnorm_stats = np.vectorize(_truncnorm_stats_scalar,
excluded=('moments',))
return _truncnorm_stats(a, b, pA, pB, moments)
def _rvs(self, a, b, size=None, random_state=None):
# if a and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(a) and np.isscalar(b):
out = self._rvs_scalar(a, b, size, random_state=random_state)
elif a.size == 1 and b.size == 1:
out = self._rvs_scalar(a.item(), b.item(), size,
random_state=random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
a, b = np.broadcast_arrays(a, b)
# a and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting a and b.
# bc is a tuple the same length as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(a.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([a, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, a, b, numsamples=None, random_state=None):
if not numsamples:
numsamples = 1
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# Calculate some rvs
U = random_state.uniform(low=0, high=1, size=N)
x = self._ppf(U, a, b)
rvs = np.reshape(x, size1d)
return rvs
truncnorm = truncnorm_gen(name='truncnorm', momtype=1)
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.uniform(0.0, 1.0, size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
@_call_super_mom
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in scipy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
`vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, kappa, size=None, random_state=None):
return random_state.vonmises(0.0, kappa, size=size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
# = exp(kappa * (cos(x) - 1)) /
# (2*pi*exp(-kappa)*I[0](kappa))
# = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return np.exp(kappa*sc.cosm1(x)) / (2*np.pi*sc.i0e(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x >= 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return random_state.wald(1.0, 1.0, size=size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _sf(self, x):
return invgauss._sf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _logcdf(self, x):
return invgauss._logcdf(x, 1.0)
def _logsf(self, x):
return invgauss._logsf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
def f1(x, cr):
# CDF for 0 <= x < pi
return 1/np.pi * np.arctan(cr*np.tan(x/2))
def f2(x, cr):
# CDF for pi <= x <= 2*pi
return 1 - 1/np.pi * np.arctan(cr*np.tan((2*np.pi - x)/2))
cr = (1 + c)/(1 - c)
return _lazywhere(x < np.pi, (x, cr), f=f1, f2=f2)
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
def _fitstart(self, data):
# Use 0.5 as the initial guess of the shape parameter.
# For the location and scale, use the minimum and
# peak-to-peak/(2*pi), respectively.
return 0.5, np.min(data), np.ptp(data)/(2*np.pi)
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
See Also
--------
laplace : Laplace distribution
norm : normal distribution
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta)
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
.. [2] Nardon, Martina, and Paolo Pianca. "Simulation techniques for
generalized Gaussian densities." Journal of Statistical
Computation and Simulation 79.11 (2009): 1317-1329
.. [3] Wicklin, Rick. "Simulate data from a generalized Gaussian
distribution" in The DO Loop blog, September 21, 2016,
https://blogs.sas.com/content/iml/2016/09/21/simulate-generalized-gaussian-sas.html
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
def _rvs(self, beta, size=None, random_state=None):
# see [2]_ for the algorithm
# see [3]_ for reference implementation in SAS
z = random_state.gamma(1/beta, size=size)
y = z ** (1/beta)
# convert y to array to ensure masking support
y = np.asarray(y)
mask = random_state.random(size=y.shape) < 0.5
y[mask] = -y[mask]
return y
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^m \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.inf)
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution used in the pdf, sf and
moment calculation.
Note that for all x > 0:
gammainc(1.5, x**2/2) = 2 * (_norm_cdf(x) - x * _norm_pdf(x) - 0.5).
This can be verified directly by noting that the cdf of Gamma(1.5) can
be written as erf(sqrt(x)) - 2*sqrt(x)*exp(-x)/sqrt(Pi).
We use gammainc instead of the usual definition because it is more precise
for small chi.
"""
return sc.gammainc(1.5, chi**2/2) / 2
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1` and :math:`\chi > 0`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
%(after_notes)s
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
.. versionadded:: 0.19.0
%(example)s
"""
def _logpdf(self, x, chi):
# for x = 0 or 1, logpdf returns -np.inf
with np.errstate(divide='ignore'):
y = 1.0 - x*x
A = 3*np.log(chi) - _norm_pdf_logC - np.log(_argus_phi(chi))
return A + np.log(x) + 0.5*np.log1p(-x*x) - chi**2 * y / 2
def _pdf(self, x, chi):
return np.exp(self._logpdf(x, chi))
def _cdf(self, x, chi):
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
def _rvs(self, chi, size=None, random_state=None):
chi = np.asarray(chi)
if chi.size == 1:
out = self._rvs_scalar(chi, numsamples=size,
random_state=random_state)
else:
shp, bc = _check_shape(chi.shape, size)
numsamples = int(np.prod(shp))
out = np.empty(size)
it = np.nditer([chi],
flags=['multi_index'],
op_flags=[['readonly']])
while not it.finished:
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
r = self._rvs_scalar(it[0], numsamples=numsamples,
random_state=random_state)
out[idx] = r.reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, chi, numsamples=None, random_state=None):
# if chi <= 1.8:
# use rejection method, see Devroye:
# Non-Uniform Random Variate Generation, 1986, section II.3.2.
# write: PDF f(x) = c * g(x) * h(x), where
# h is [0,1]-valued and g is a density
# we use two ways to write f
#
# Case 1:
# write g(x) = 3*x*sqrt(1-x**2), h(x) = exp(-chi**2 (1-x**2) / 2)
# If X has a distribution with density g its ppf G_inv is given by:
# G_inv(u) = np.sqrt(1 - u**(2/3))
#
# Case 2:
# g(x) = chi**2 * x * exp(-chi**2 * (1-x**2)/2) / (1 - exp(-chi**2 /2))
# h(x) = sqrt(1 - x**2), 0 <= x <= 1
# one can show that
# G_inv(u) = np.sqrt(2*np.log(u*(np.exp(chi**2/2)-1)+1))/chi
# = np.sqrt(1 + 2*np.log(np.exp(-chi**2/2)*(1-u)+u)/chi**2)
# the latter expression is used for precision with small chi
#
# In both cases, the inverse cdf of g can be written analytically, and
# we can apply the rejection method:
#
# REPEAT
# Generate U uniformly distributed on [0, 1]
# Generate X with density g (e.g. via inverse transform sampling:
# X = G_inv(V) with V uniformly distributed on [0, 1])
# UNTIL X <= h(X)
# RETURN X
#
# We use case 1 for chi <= 0.5 as it maintains precision for small chi
# and case 2 for 0.5 < chi <= 1.8 due to its speed for moderate chi.
#
# if chi > 1.8:
# use relation to the Gamma distribution: if X is ARGUS with parameter
# chi), then Y = chi**2 * (1 - X**2) / 2 has density proportional to
# sqrt(u) * exp(-u) on [0, chi**2 / 2], i.e. a Gamma(3/2) distribution
# conditioned on [0, chi**2 / 2]). Therefore, to sample X from the
# ARGUS distribution, we sample Y from the gamma distribution, keeping
# only samples on [0, chi**2 / 2], and apply the inverse
# transformation X = (1 - 2*Y/chi**2)**(1/2). Since we only
# look at chi > 1.8, gamma(1.5).cdf(chi**2/2) is large enough such
# Y falls in the inteval [0, chi**2 / 2] with a high probability:
# stats.gamma(1.5).cdf(1.8**2/2) = 0.644...
#
# The points to switch between the different methods are determined
# by a comparison of the runtime of the different methods. However,
# the runtime is platform-dependent. The implemented values should
# ensure a good overall performance and are supported by an analysis
# of the rejection constants of different methods.
size1d = tuple(np.atleast_1d(numsamples))
N = int(np.prod(size1d))
x = np.zeros(N)
simulated = 0
chi2 = chi * chi
if chi <= 0.5:
d = -chi2 / 2
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
z = v**(2/3)
# acceptance condition: u <= h(G_inv(v)). This simplifies to
accept = (np.log(u) <= d * z)
num_accept = np.sum(accept)
if num_accept > 0:
# we still need to transform z=v**(2/3) to X = G_inv(v)
rvs = np.sqrt(1 - z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
elif chi <= 1.8:
echi = np.exp(-chi2 / 2)
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
z = 2 * np.log(echi * (1 - v) + v) / chi2
# as in case one, simplify u <= h(G_inv(v)) and then transform
# z to the target distribution X = G_inv(v)
accept = (u**2 + z <= 0)
num_accept = np.sum(accept)
if num_accept > 0:
rvs = np.sqrt(1 + z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
else:
# conditional Gamma for chi > 1.8
while simulated < N:
k = N - simulated
g = random_state.standard_gamma(1.5, size=k)
accept = (g <= chi2 / 2)
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = g[accept]
simulated += num_accept
x = np.sqrt(1 - 2 * x / chi2)
return np.reshape(x, size1d)
def _stats(self, chi):
# need to ensure that dtype is float
# otherwise the mask below does not work for integers
chi = np.asarray(chi, dtype=float)
phi = _argus_phi(chi)
m = np.sqrt(np.pi/8) * chi * sc.ive(1, chi**2/4) / phi
# compute second moment, use Taylor expansion for small chi (<= 0.1)
mu2 = np.empty_like(chi)
mask = chi > 0.1
c = chi[mask]
mu2[mask] = 1 - 3 / c**2 + c * _norm_pdf(c) / phi[mask]
c = chi[~mask]
coef = [-358/65690625, 0, -94/1010625, 0, 2/2625, 0, 6/175, 0, 0.4]
mu2[~mask] = np.polyval(coef, c)
return m, mu2 - m**2, None, None
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self.a = self._hbins[0]
kwargs['b'] = self.b = self._hbins[-1]
super().__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super()._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
class studentized_range_gen(rv_continuous):
r"""A studentized range continuous random variable.
%(before_notes)s
See Also
--------
t: Student's t distribution
Notes
-----
The probability density function for `studentized_range` is:
.. math::
f(x; k, \nu) = \frac{k(k-1)\nu^{\nu/2}}{\Gamma(\nu/2)
2^{\nu/2-1}} \int_{0}^{\infty} \int_{-\infty}^{\infty}
s^{\nu} e^{-\nu s^2/2} \phi(z) \phi(sx + z)
[\Phi(sx + z) - \Phi(z)]^{k-2} \,dz \,ds
for :math:`x ≥ 0`, :math:`k > 1`, and :math:`\nu > 0`.
`studentized_range` takes ``k`` for :math:`k` and ``df`` for :math:`\nu`
as shape parameters.
When :math:`\nu` exceeds 100,000, an asymptotic approximation (infinite
degrees of freedom) is used to compute the cumulative distribution
function [4]_.
%(after_notes)s
References
----------
.. [1] "Studentized range distribution",
https://en.wikipedia.org/wiki/Studentized_range_distribution
.. [2] Batista, Ben Dêivide, et al. "Externally Studentized Normal Midrange
Distribution." Ciência e Agrotecnologia, vol. 41, no. 4, 2017, pp.
378-389., doi:10.1590/1413-70542017414047716.
.. [3] Harter, H. Leon. "Tables of Range and Studentized Range." The Annals
of Mathematical Statistics, vol. 31, no. 4, 1960, pp. 1122-1147.
JSTOR, www.jstor.org/stable/2237810. Accessed 18 Feb. 2021.
.. [4] Lund, R. E., and J. R. Lund. "Algorithm AS 190: Probabilities and
Upper Quantiles for the Studentized Range." Journal of the Royal
Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
Feb. 2021.
Examples
--------
>>> from scipy.stats import studentized_range
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
>>> k, df = 3, 10
>>> mean, var, skew, kurt = studentized_range.stats(k, df, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(studentized_range.ppf(0.01, k, df),
... studentized_range.ppf(0.99, k, df), 100)
>>> ax.plot(x, studentized_range.pdf(x, k, df),
... 'r-', lw=5, alpha=0.6, label='studentized_range pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = studentized_range(k, df)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = studentized_range.ppf([0.001, 0.5, 0.999], k, df)
>>> np.allclose([0.001, 0.5, 0.999], studentized_range.cdf(vals, k, df))
True
Rather than using (``studentized_range.rvs``) to generate random variates,
which is very slow for this distribution, we can approximate the inverse
CDF using an interpolator, and then perform inverse transform sampling
with this approximate inverse CDF.
This distribution has an infinite but thin right tail, so we focus our
attention on the leftmost 99.9 percent.
>>> a, b = studentized_range.ppf([0, .999], k, df)
>>> a, b
0, 7.41058083802274
>>> from scipy.interpolate import interp1d
>>> rng = np.random.default_rng()
>>> xs = np.linspace(a, b, 50)
>>> cdf = studentized_range.cdf(xs, k, df)
# Create an interpolant of the inverse CDF
>>> ppf = interp1d(cdf, xs, fill_value='extrapolate')
# Perform inverse transform sampling using the interpolant
>>> r = ppf(rng.uniform(size=1000))
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
def _argcheck(self, k, df):
return (k > 1) & (df > 0)
def _fitstart(self, data):
# Default is k=1, but that is not a valid value of the parameter.
return super(studentized_range_gen, self)._fitstart(data, args=(2, 1))
def _munp(self, K, k, df):
cython_symbol = '_studentized_range_moment'
_a, _b = self._get_support()
# all three of these are used to create a numpy array so they must
# be the same shape.
def _single_moment(K, k, df):
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [K, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
ranges = [(-np.inf, np.inf), (0, np.inf), (_a, _b)]
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_moment, 3, 1)
return np.float64(ufunc(K, k, df))
def _pdf(self, x, k, df):
cython_symbol = '_studentized_range_pdf'
def _single_pdf(q, k, df):
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
ranges = [(-np.inf, np.inf), (0, np.inf)]
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_pdf, 3, 1)
return np.float64(ufunc(x, k, df))
def _cdf(self, x, k, df):
def _single_cdf(q, k, df):
# "When the degrees of freedom V are infinite the probability
# integral takes [on a] simpler form," and a single asymptotic
# integral is evaluated rather than the standard double integral.
# (Lund, Lund, page 205)
if df < 100000:
cython_symbol = '_studentized_range_cdf'
log_const = _stats._studentized_range_cdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf), (0, np.inf)]
else:
cython_symbol = '_studentized_range_cdf_asymptotic'
arg = [q, k]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf)]
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_cdf, 3, 1)
return np.float64(ufunc(x, k, df))
studentized_range = studentized_range_gen(name='studentized_range', a=0,
b=np.inf)
# Collect names of classes and objects in this module.
pairs = list(globals().copy().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
|
rgommers/scipy
|
scipy/stats/_continuous_distns.py
|
Python
|
bsd-3-clause
| 280,760
|
[
"CRYSTAL",
"DIRAC",
"Gaussian"
] |
369c968aa68b38dbc2ea9c15d6c48f542b442637ade3d97f11b1398f0c88b9cd
|
"""scandir, a better directory iterator and faster os.walk(), now in the Python 3.5 stdlib
scandir() is a generator version of os.listdir() that returns an
iterator over files in a directory, and also exposes the extra
information most OSes provide while iterating files in a directory
(such as type and stat information).
This module also includes a version of os.walk() that uses scandir()
to speed it up significantly.
See README.md or https://github.com/benhoyt/scandir for rationale and
docs, or read PEP 471 (https://www.python.org/dev/peps/pep-0471/) for
more details on its inclusion into Python 3.5
scandir is released under the new BSD 3-clause license. See
LICENSE.txt for the full license text.
"""
from __future__ import division
from errno import ENOENT
from os import listdir, lstat, stat, strerror
from os.path import join, islink
from stat import S_IFDIR, S_IFLNK, S_IFREG
import collections
import sys
try:
import _scandir
except ImportError:
_scandir = None
try:
import ctypes
except ImportError:
ctypes = None
if _scandir is None and ctypes is None:
import warnings
warnings.warn("scandir can't find the compiled _scandir C module "
"or ctypes, using slow generic fallback")
__version__ = '1.10.1'
__all__ = ['scandir', 'walk']
# Windows FILE_ATTRIBUTE constants for interpreting the
# FIND_DATA.dwFileAttributes member
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_VIRTUAL = 65536
IS_PY3 = sys.version_info >= (3, 0)
if IS_PY3:
unicode = str # Because Python <= 3.2 doesn't have u'unicode' syntax
class GenericDirEntry(object):
__slots__ = ('name', '_stat', '_lstat', '_scandir_path', '_path')
def __init__(self, scandir_path, name):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
self._stat = stat(self.path)
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
# The code duplication below is intentional: this is for slightly
# better performance on systems that fall back to GenericDirEntry.
# It avoids an additional attribute lookup and method call, which
# are relatively slow on CPython.
def is_dir(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFDIR
def is_file(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFREG
def is_symlink(self):
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFLNK
def inode(self):
st = self.stat(follow_symlinks=False)
return st.st_ino
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def _scandir_generic(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
for name in listdir(path):
yield GenericDirEntry(path, name)
if IS_PY3 and sys.platform == 'win32':
def scandir_generic(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_generic(path)
scandir_generic.__doc__ = _scandir_generic.__doc__
else:
scandir_generic = _scandir_generic
scandir_c = None
scandir_python = None
if sys.platform == 'win32':
if ctypes is not None:
from ctypes import wintypes
# Various constants from windows.h
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
ERROR_FILE_NOT_FOUND = 2
ERROR_NO_MORE_FILES = 18
IO_REPARSE_TAG_SYMLINK = 0xA000000C
# Number of seconds between 1601-01-01 and 1970-01-01
SECONDS_BETWEEN_EPOCHS = 11644473600
kernel32 = ctypes.windll.kernel32
# ctypes wrappers for (wide string versions of) FindFirstFile,
# FindNextFile, and FindClose
FindFirstFile = kernel32.FindFirstFileW
FindFirstFile.argtypes = [
wintypes.LPCWSTR,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindFirstFile.restype = wintypes.HANDLE
FindNextFile = kernel32.FindNextFileW
FindNextFile.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindNextFile.restype = wintypes.BOOL
FindClose = kernel32.FindClose
FindClose.argtypes = [wintypes.HANDLE]
FindClose.restype = wintypes.BOOL
Win32StatResult = collections.namedtuple('Win32StatResult', [
'st_mode',
'st_ino',
'st_dev',
'st_nlink',
'st_uid',
'st_gid',
'st_size',
'st_atime',
'st_mtime',
'st_ctime',
'st_atime_ns',
'st_mtime_ns',
'st_ctime_ns',
'st_file_attributes',
])
def filetime_to_time(filetime):
"""Convert Win32 FILETIME to time since Unix epoch in seconds."""
total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime
return total / 10000000 - SECONDS_BETWEEN_EPOCHS
def find_data_to_stat(data):
"""Convert Win32 FIND_DATA struct to stat_result."""
# First convert Win32 dwFileAttributes to st_mode
attributes = data.dwFileAttributes
st_mode = 0
if attributes & FILE_ATTRIBUTE_DIRECTORY:
st_mode |= S_IFDIR | 0o111
else:
st_mode |= S_IFREG
if attributes & FILE_ATTRIBUTE_READONLY:
st_mode |= 0o444
else:
st_mode |= 0o666
if (attributes & FILE_ATTRIBUTE_REPARSE_POINT and
data.dwReserved0 == IO_REPARSE_TAG_SYMLINK):
st_mode ^= st_mode & 0o170000
st_mode |= S_IFLNK
st_size = data.nFileSizeHigh << 32 | data.nFileSizeLow
st_atime = filetime_to_time(data.ftLastAccessTime)
st_mtime = filetime_to_time(data.ftLastWriteTime)
st_ctime = filetime_to_time(data.ftCreationTime)
# Some fields set to zero per CPython's posixmodule.c: st_ino, st_dev,
# st_nlink, st_uid, st_gid
return Win32StatResult(st_mode, 0, 0, 0, 0, 0, st_size,
st_atime, st_mtime, st_ctime,
int(st_atime * 1000000000),
int(st_mtime * 1000000000),
int(st_ctime * 1000000000),
attributes)
class Win32DirEntryPython(object):
__slots__ = ('name', '_stat', '_lstat', '_find_data', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, find_data):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._find_data = find_data
self._path = None
self._inode = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
# It's a symlink, call link-following stat()
self._stat = stat(self.path)
else:
# Not a symlink, stat is same as lstat value
if self._lstat is None:
self._lstat = find_data_to_stat(self._find_data)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
# Lazily convert to stat object, because it's slow
# in Python, and often we only need is_dir() etc
self._lstat = find_data_to_stat(self._find_data)
return self._lstat
def is_dir(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFDIR
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY != 0)
def is_file(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFREG
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY == 0)
def is_symlink(self):
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_REPARSE_POINT != 0 and
self._find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK)
def inode(self):
if self._inode is None:
self._inode = lstat(self.path).st_ino
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def win_error(error, filename):
exc = WindowsError(error, ctypes.FormatError(error))
exc.filename = filename
return exc
def _scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
# Call FindFirstFile and handle errors
if isinstance(path, bytes):
is_bytes = True
filename = join(path.decode('mbcs', 'strict'), '*.*')
else:
is_bytes = False
filename = join(path, '*.*')
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
handle = FindFirstFile(filename, data_p)
if handle == INVALID_HANDLE_VALUE:
error = ctypes.GetLastError()
if error == ERROR_FILE_NOT_FOUND:
# No files, don't yield anything
return
raise win_error(error, path)
# Call FindNextFile in a loop, stopping when no more files
try:
while True:
# Skip '.' and '..' (current and parent directory), but
# otherwise yield (filename, stat_result) tuple
name = data.cFileName
if name not in ('.', '..'):
if is_bytes:
name = name.encode('mbcs', 'replace')
yield Win32DirEntryPython(path, name, data)
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
success = FindNextFile(handle, data_p)
if not success:
error = ctypes.GetLastError()
if error == ERROR_NO_MORE_FILES:
break
raise win_error(error, path)
finally:
if not FindClose(handle):
raise win_error(ctypes.GetLastError(), path)
if IS_PY3:
def scandir_python(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_python(path)
scandir_python.__doc__ = _scandir_python.__doc__
else:
scandir_python = _scandir_python
if _scandir is not None:
scandir_c = _scandir.scandir
DirEntry_c = _scandir.DirEntry
if _scandir is not None:
scandir = scandir_c
DirEntry = DirEntry_c
elif ctypes is not None:
scandir = scandir_python
DirEntry = Win32DirEntryPython
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
# Linux, OS X, and BSD implementation
elif sys.platform.startswith(('linux', 'darwin', 'sunos5')) or 'bsd' in sys.platform:
have_dirent_d_type = (sys.platform != 'sunos5')
if ctypes is not None and have_dirent_d_type:
import ctypes.util
DIR_p = ctypes.c_void_p
# Rather annoying how the dirent struct is slightly different on each
# platform. The only fields we care about are d_name and d_type.
class Dirent(ctypes.Structure):
if sys.platform.startswith('linux'):
_fields_ = (
('d_ino', ctypes.c_ulong),
('d_off', ctypes.c_long),
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
elif 'openbsd' in sys.platform:
_fields_ = (
('d_ino', ctypes.c_uint64),
('d_off', ctypes.c_uint64),
('d_reclen', ctypes.c_uint16),
('d_type', ctypes.c_uint8),
('d_namlen', ctypes.c_uint8),
('__d_padding', ctypes.c_uint8 * 4),
('d_name', ctypes.c_char * 256),
)
else:
_fields_ = (
('d_ino', ctypes.c_uint32), # must be uint32, not ulong
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_namlen', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
DT_UNKNOWN = 0
DT_DIR = 4
DT_REG = 8
DT_LNK = 10
Dirent_p = ctypes.POINTER(Dirent)
Dirent_pp = ctypes.POINTER(Dirent_p)
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
opendir = libc.opendir
opendir.argtypes = [ctypes.c_char_p]
opendir.restype = DIR_p
readdir_r = libc.readdir_r
readdir_r.argtypes = [DIR_p, Dirent_p, Dirent_pp]
readdir_r.restype = ctypes.c_int
closedir = libc.closedir
closedir.argtypes = [DIR_p]
closedir.restype = ctypes.c_int
file_system_encoding = sys.getfilesystemencoding()
class PosixDirEntry(object):
__slots__ = ('name', '_d_type', '_stat', '_lstat', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, d_type, inode):
self._scandir_path = scandir_path
self.name = name
self._d_type = d_type
self._inode = inode
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
self._stat = stat(self.path)
else:
if self._lstat is None:
self._lstat = lstat(self.path)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
def is_dir(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFDIR
else:
return self._d_type == DT_DIR
def is_file(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFREG
else:
return self._d_type == DT_REG
def is_symlink(self):
if self._d_type == DT_UNKNOWN:
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFLNK
else:
return self._d_type == DT_LNK
def inode(self):
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def posix_error(filename):
errno = ctypes.get_errno()
exc = OSError(errno, strerror(errno))
exc.filename = filename
return exc
def scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
if isinstance(path, bytes):
opendir_path = path
is_bytes = True
else:
opendir_path = path.encode(file_system_encoding)
is_bytes = False
dir_p = opendir(opendir_path)
if not dir_p:
raise posix_error(path)
try:
result = Dirent_p()
while True:
entry = Dirent()
if readdir_r(dir_p, entry, result):
raise posix_error(path)
if not result:
break
name = entry.d_name
if name not in (b'.', b'..'):
if not is_bytes:
name = name.decode(file_system_encoding)
yield PosixDirEntry(path, name, entry.d_type, entry.d_ino)
finally:
if closedir(dir_p):
raise posix_error(path)
if _scandir is not None:
scandir_c = _scandir.scandir
DirEntry_c = _scandir.DirEntry
if _scandir is not None:
scandir = scandir_c
DirEntry = DirEntry_c
elif ctypes is not None and have_dirent_d_type:
scandir = scandir_python
DirEntry = PosixDirEntry
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
# Some other system -- no d_type or stat information
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
def _walk(top, topdown=True, onerror=None, followlinks=False):
"""Like Python 3.5's implementation of os.walk() -- faster than
the pre-Python 3.5 version as it uses scandir() internally.
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
for entry in walk(entry.path, topdown, onerror, followlinks):
yield entry
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
for name in dirs:
new_path = join(top, name)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
for entry in walk(new_path, topdown, onerror, followlinks):
yield entry
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs
if IS_PY3 or sys.platform != 'win32':
walk = _walk
else:
# Fix for broken unicode handling on Windows on Python 2.x, see:
# https://github.com/benhoyt/scandir/issues/54
file_system_encoding = sys.getfilesystemencoding()
def walk(top, topdown=True, onerror=None, followlinks=False):
if isinstance(top, bytes):
top = top.decode(file_system_encoding)
return _walk(top, topdown, onerror, followlinks)
|
benhoyt/scandir
|
scandir.py
|
Python
|
bsd-3-clause
| 24,828
|
[
"VisIt"
] |
3d52067007c851dedff29a37f28e192fdb0406cf742ade8533c39bc1701dbf01
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if squared_norm(centers_old - centers) <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr')
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
if X.dtype.kind != 'f':
warnings.warn("Got data type %s, converted to float "
"to avoid overflows" % X.dtype,
RuntimeWarning, stacklevel=2)
X = X.astype(np.float)
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
abhishekkrthakur/scikit-learn
|
sklearn/cluster/k_means_.py
|
Python
|
bsd-3-clause
| 54,798
|
[
"Gaussian"
] |
0d20162d9c61ca1630814ba3b178130628c6bb8d8bfb88481f21591a42f06925
|
'''
Created on Aug 3, 2011
@author: sean
'''
import _ast
from ..visitors import Visitor
class Replacer(Visitor):
'''
Visitor to replace nodes.
'''
def __init__(self, old, new):
self.old = old
self.new = new
def visitDefault(self, node):
for field in node._fields:
value = getattr(node, field)
if value == self.old:
setattr(node, field, self.new)
if isinstance(value, (list, tuple)):
for i, item in enumerate(value):
if item == self.old:
value[i] = self.new
elif isinstance(item, _ast.AST):
self.visit(item)
else:
pass
elif isinstance(value, _ast.AST):
self.visit(value)
else:
pass
return
def replace_nodes(root, old, new):
'''
Replace the old node with the new one.
Old must be an indirect child of root
:param root: ast node that contains an indirect refrence to old
:param old: node to replace
:param new: node to replace `old` with
'''
rep = Replacer(old, new)
rep.visit(root)
return
class NodeRemover(Visitor):
'''
Remove a node.
'''
def __init__(self, to_remove):
self.to_remove
def visitDefault(self, node):
for field in node._fields:
value = getattr(node, field)
if value in self.to_remove:
setattr(node, field, self.new)
if isinstance(value, (list, tuple)):
for i, item in enumerate(value):
if item == self.old:
value[i] = self.new
elif isinstance(item, _ast.AST):
self.visit(item)
else:
pass
elif isinstance(value, _ast.AST):
self.visit(value)
else:
pass
return
|
diana-hep/femtocode
|
lang/femtocode/thirdparty/meta/asttools/mutators/replace_mutator.py
|
Python
|
apache-2.0
| 2,046
|
[
"VisIt"
] |
a43d27460faee020e73b52bd72063422bc5e851cc1c0732ca0b9665f2d0e4c48
|
"""
A simple VTK widget for PyQt v4, the Qt v4 bindings for Python.
See http://www.trolltech.com for Qt documentation, and
http://www.riverbankcomputing.co.uk for PyQt.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
"""
import sys
import os
from pyface.qt import qt_api
if qt_api == 'pyqt':
from PyQt4 import QtGui, QtCore
else:
from PySide import QtGui, QtCore
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [ py_object ]
import vtk
from tvtk import messenger
class QVTKRenderWindowInteractor(QtGui.QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: QtCore.Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: QtCore.Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: QtCore.Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: QtCore.Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: QtCore.Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: QtCore.Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: QtCore.Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: QtCore.Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: QtCore.Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: QtCore.Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: QtCore.Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
# Map from Qt key codes to VTK key names
_KEY_MAP = {
QtCore.Qt.Key_Escape: "Esc",
QtCore.Qt.Key_Tab: "Tab",
QtCore.Qt.Key_Backtab: "Backtab",
QtCore.Qt.Key_Backspace: "Backspace",
QtCore.Qt.Key_Return: "Return",
QtCore.Qt.Key_Enter: "Enter",
QtCore.Qt.Key_Insert: "Insert",
QtCore.Qt.Key_Delete: "Delete",
QtCore.Qt.Key_Pause: "Pause",
QtCore.Qt.Key_Print: "Print",
QtCore.Qt.Key_SysReq: "Sysreq",
QtCore.Qt.Key_Clear: "Clear",
QtCore.Qt.Key_Home: "Home",
QtCore.Qt.Key_End: "End",
QtCore.Qt.Key_Left: "Left",
QtCore.Qt.Key_Up: "Up",
QtCore.Qt.Key_Right: "Right",
QtCore.Qt.Key_Down: "Down",
QtCore.Qt.Key_PageUp: "Prior",
QtCore.Qt.Key_PageDown: "Next",
QtCore.Qt.Key_Meta: "Meta",
QtCore.Qt.Key_CapsLock: "Caps_Lock",
QtCore.Qt.Key_NumLock: "Num_Lock",
QtCore.Qt.Key_ScrollLock: "Scroll_Lock",
QtCore.Qt.Key_F1: "F1",
QtCore.Qt.Key_F2: "F2",
QtCore.Qt.Key_F3: "F3",
QtCore.Qt.Key_F4: "F4",
QtCore.Qt.Key_F5: "F5",
QtCore.Qt.Key_F6: "F6",
QtCore.Qt.Key_F7: "F7",
QtCore.Qt.Key_F8: "F8",
QtCore.Qt.Key_F9: "F9",
QtCore.Qt.Key_F10: "F10",
QtCore.Qt.Key_F11: "F11",
QtCore.Qt.Key_F12: "F12",
}
def __init__(self, parent=None, wflags=QtCore.Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = QtCore.Qt.NoButton
# private attributes
self.__oldFocus = None
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = QtCore.Qt.NoModifier
self.__saveButtons = QtCore.Qt.NoButton
# do special handling of some keywords:
# stereo, rw
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
rw = None
if kw.has_key('rw'):
rw = kw['rw']
# create qt-level widget
QtGui.QWidget.__init__(self, parent, wflags|QtCore.Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
if qt_api == 'pyqt' or sys.platform != 'win32':
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
else:
# On Windows PySide has a bug with winID() function, so this is fix:
self._RenderWindow.SetWindowInfo(
str(int(pythonapi.PyCObject_AsVoidPtr(self.winId()))))
self._should_set_parent_info = (sys.platform == 'win32')
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setAttribute(QtCore.Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(QtCore.Qt.WheelFocus)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))
self._Timer = QtCore.QTimer(self)
self.connect(self._Timer, QtCore.SIGNAL('timeout()'), self.TimerEvent)
# add wheel timer to fix scrolling issue with trackpad
self.wheel_timer = QtCore.QTimer()
self.wheel_timer.setSingleShot(True)
self.wheel_timer.setInterval(25)
self.wheel_timer.timeout.connect(self._emit_wheel_event)
self.wheel_accumulator = 0
self._saved_wheel_event_info = ()
self._Iren.AddObserver('CreateTimerEvent', messenger.send)
messenger.connect(self._Iren, 'CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', messenger.send)
messenger.connect(self._Iren, 'DestroyTimerEvent', self.DestroyTimer)
render_window = self._Iren.GetRenderWindow()
render_window.AddObserver('CursorChangedEvent', messenger.send)
messenger.connect(render_window, 'CursorChangedEvent', self.CursorChangedEvent)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QtCore.QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(QtCore.Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, QtCore.Qt.ArrowCursor)
self.setCursor(qt_cursor)
def sizeHint(self):
return QtCore.QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._RenderWindow.Render()
def resizeEvent(self, ev):
if self._should_set_parent_info:
# Set the window info and parent info on every resize.
# vtkWin32OpenGLRenderWindow will render using incorrect offsets if
# the parent info is not given to it because it assumes that it
# needs to make room for the title bar.
if qt_api == 'pyqt' or sys.platform != 'win32':
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
else:
# On Windows PySide has a bug with winID() function, so this is fix:
self._RenderWindow.SetWindowInfo(
str(int(pythonapi.PyCObject_AsVoidPtr(self.winId()))))
parent = self.parent()
if parent is not None:
if qt_api == 'pyqt' or sys.platform != 'win32':
self._RenderWindow.SetParentInfo(str(int(self.winId())))
else:
# On Windows PySide has a bug with winID() function, so this is fix:
self._RenderWindow.SetParentInfo(
str(int(pythonapi.PyCObject_AsVoidPtr(self.winId()))))
else:
self._RenderWindow.SetParentInfo('')
w = self.width()
h = self.height()
self._RenderWindow.SetSize(w, h)
self._Iren.SetSize(w, h)
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & QtCore.Qt.ShiftModifier:
shift = True
if ev.modifiers() & QtCore.Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & QtCore.Qt.ShiftModifier:
shift = True
if self.__saveModifiers & QtCore.Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
if not self.hasFocus():
self.__oldFocus = self.focusWidget()
self.setFocus()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
if self.__saveButtons == QtCore.Qt.NoButton and self.__oldFocus:
self.__oldFocus.setFocus()
self.__oldFocus = None
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QtCore.QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == QtCore.Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == QtCore.Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == QtCore.Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == QtCore.Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == QtCore.Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == QtCore.Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
key_sym = self._KEY_MAP.get(ev.key(), None)
if ev.key() < 256:
# Sometimes, the OS allows a chord (e.g. Alt-T) to generate
# a Unicode character outside of the 8-bit Latin-1 range. We will
# try to pass along Latin-1 characters unchanged, since VTK expects
# a single `char` byte. If not, we will try to pass on the root key
# of the chord (e.g. 'T' above).
if ev.text() and ev.text() <= u'\u00ff':
key = ev.text().encode('latin-1')
else:
# Has modifiers, but an ASCII key code.
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, key_sym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
key_sym = self._KEY_MAP.get(ev.key(), None)
if ev.key() < 256:
if ev.text() and ev.text() <= u'\u00ff':
key = ev.text().encode('latin-1')
else:
# Has modifiers, but an ASCII key code.
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, key_sym)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
""" Reimplemented to work around scrolling bug in Mac.
Work around https://bugreports.qt-project.org/browse/QTBUG-22269.
Accumulate wheel events that are within a period of 25ms into a single
event. Changes in buttons or modifiers, while a scroll is going on,
are not handled, since they seem to be too much of a corner case to be
worth handling.
"""
self.wheel_accumulator += ev.delta()
self._saved_wheel_event_info = (
ev.pos(),
ev.globalPos(),
self.wheel_accumulator,
ev.buttons(),
ev.modifiers(),
ev.orientation()
)
ev.setAccepted(True)
if not self.wheel_timer.isActive():
self.wheel_timer.start()
def _emit_wheel_event(self):
ev = QtGui.QWheelEvent(*self._saved_wheel_event_info)
if ev.delta() >= 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
self.wheel_timer.stop()
self.wheel_accumulator = 0
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QtGui.QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
|
liulion/mayavi
|
tvtk/pyface/ui/qt4/QVTKRenderWindowInteractor.py
|
Python
|
bsd-3-clause
| 19,019
|
[
"CRYSTAL",
"VTK"
] |
6a06837374a88618e107c4ef3850bfa3dcd4e3fa80a158b75ecb0e591b389ca7
|
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
#
import os
import GEOM
import SALOMEDS
import hexablock
# differents cas possibles de remplissage possibles :
print "test grille cylindrique..."
doc = hexablock.addDocument()
orig1 = doc.addVertex(0, 0, 0)
orig2 = doc.addVertex(10, 0, 0)
orig3 = doc.addVertex(0, 10, 0)
orig4 = doc.addVertex(10, 10, 0)
orig5 = doc.addVertex(0, 20, 0)
orig6 = doc.addVertex(10, 20, 0)
vz = doc.addVector(0, 0, 1)
vx = doc.addVector(1, 0, 0)
dr = 1
dl = 1
nr = 2
nl = 3
c1 = doc.makeCylindrical(orig1, vx, vz, dr, 360, dl, nr, 4, nl, True)
c2 = doc.makeCylindrical(orig2, vx, vz, dr, 360, dl, nr, 8, nl, True)
c3 = doc.makeCylindrical(orig3, vx, vz, dr, 270, dl, nr, 8, nl, True)
c4 = doc.makeCylindrical(orig4, vx, vz, dr, 270, dl, nr, 7, nl, True)
c5 = doc.makeCylindrical(orig5, vx, vz, dr, 360, dl, nr, 5, nl, True)
c6 = doc.makeCylindrical(orig6, vx, vz, dr, 360, dl, nr, 6, nl, True)
base2 = nr * nl * 8
for i in range(2):
c2.getHexa(base2 + i).setScalar(5)
file_name = os.path.join(os.environ['TMP'], 'grilles_cylindriques.vtk')
#### doc.saveVtk(file_name)
print "...test grille cylindrique OK"
|
FedoraScientific/salome-hexablock
|
doc/pyplots/test_make_cyl_grid.py
|
Python
|
lgpl-2.1
| 1,958
|
[
"VTK"
] |
54fa41155232989d63eea9719f9b7294d978ad5f491aa70805cdb4ad41fbcec0
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.asset_v1p2beta1.services.asset_service import AssetServiceAsyncClient
from google.cloud.asset_v1p2beta1.services.asset_service import AssetServiceClient
from google.cloud.asset_v1p2beta1.services.asset_service import transports
from google.cloud.asset_v1p2beta1.types import asset_service
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AssetServiceClient._get_default_mtls_endpoint(None) is None
assert (
AssetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert AssetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient,])
def test_asset_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudasset.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.AssetServiceGrpcTransport, "grpc"),
(transports.AssetServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_asset_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient,])
def test_asset_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudasset.googleapis.com:443"
def test_asset_service_client_get_transport_class():
transport = AssetServiceClient.get_transport_class()
available_transports = [
transports.AssetServiceGrpcTransport,
]
assert transport in available_transports
transport = AssetServiceClient.get_transport_class("grpc")
assert transport == transports.AssetServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
def test_asset_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AssetServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AssetServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc", "true"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc", "false"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_asset_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient])
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
def test_asset_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_asset_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AssetServiceClient,
transports.AssetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_asset_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_asset_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.asset_v1p2beta1.services.asset_service.transports.AssetServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AssetServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AssetServiceClient,
transports.AssetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_asset_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudasset.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="cloudasset.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [asset_service.CreateFeedRequest, dict,])
def test_create_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
)
response = client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.CreateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
def test_create_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
client.create_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.CreateFeedRequest()
@pytest.mark.asyncio
async def test_create_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.CreateFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
)
)
response = await client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.CreateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
@pytest.mark.asyncio
async def test_create_feed_async_from_dict():
await test_create_feed_async(request_type=dict)
def test_create_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.CreateFeedRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
call.return_value = asset_service.Feed()
client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.CreateFeedRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
await client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_feed(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_create_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_feed(
asset_service.CreateFeedRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_create_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_feed(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_feed(
asset_service.CreateFeedRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [asset_service.GetFeedRequest, dict,])
def test_get_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
)
response = client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.GetFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
def test_get_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
client.get_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.GetFeedRequest()
@pytest.mark.asyncio
async def test_get_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.GetFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
)
)
response = await client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.GetFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
@pytest.mark.asyncio
async def test_get_feed_async_from_dict():
await test_get_feed_async(request_type=dict)
def test_get_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.GetFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
call.return_value = asset_service.Feed()
client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.GetFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
await client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_feed(
asset_service.GetFeedRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_feed(
asset_service.GetFeedRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [asset_service.ListFeedsRequest, dict,])
def test_list_feeds(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListFeedsResponse()
response = client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListFeedsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.ListFeedsResponse)
def test_list_feeds_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
client.list_feeds()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListFeedsRequest()
@pytest.mark.asyncio
async def test_list_feeds_async(
transport: str = "grpc_asyncio", request_type=asset_service.ListFeedsRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListFeedsResponse()
)
response = await client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListFeedsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.ListFeedsResponse)
@pytest.mark.asyncio
async def test_list_feeds_async_from_dict():
await test_list_feeds_async(request_type=dict)
def test_list_feeds_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListFeedsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
call.return_value = asset_service.ListFeedsResponse()
client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_feeds_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListFeedsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListFeedsResponse()
)
await client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_feeds_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListFeedsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_feeds(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_feeds_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_feeds(
asset_service.ListFeedsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_feeds_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListFeedsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListFeedsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_feeds(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_feeds_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_feeds(
asset_service.ListFeedsRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [asset_service.UpdateFeedRequest, dict,])
def test_update_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
)
response = client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.UpdateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
def test_update_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
client.update_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.UpdateFeedRequest()
@pytest.mark.asyncio
async def test_update_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.UpdateFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
)
)
response = await client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.UpdateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
@pytest.mark.asyncio
async def test_update_feed_async_from_dict():
await test_update_feed_async(request_type=dict)
def test_update_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.UpdateFeedRequest()
request.feed.name = "feed.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
call.return_value = asset_service.Feed()
client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "feed.name=feed.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.UpdateFeedRequest()
request.feed.name = "feed.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
await client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "feed.name=feed.name/value",) in kw["metadata"]
def test_update_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_feed(feed=asset_service.Feed(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].feed
mock_val = asset_service.Feed(name="name_value")
assert arg == mock_val
def test_update_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_feed(
asset_service.UpdateFeedRequest(),
feed=asset_service.Feed(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_feed(feed=asset_service.Feed(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].feed
mock_val = asset_service.Feed(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_feed(
asset_service.UpdateFeedRequest(),
feed=asset_service.Feed(name="name_value"),
)
@pytest.mark.parametrize("request_type", [asset_service.DeleteFeedRequest, dict,])
def test_delete_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.DeleteFeedRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
client.delete_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.DeleteFeedRequest()
@pytest.mark.asyncio
async def test_delete_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.DeleteFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.DeleteFeedRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_feed_async_from_dict():
await test_delete_feed_async(request_type=dict)
def test_delete_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.DeleteFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
call.return_value = None
client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.DeleteFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_feed(
asset_service.DeleteFeedRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_feed(
asset_service.DeleteFeedRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AssetServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AssetServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AssetServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.AssetServiceGrpcTransport,
transports.AssetServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AssetServiceGrpcTransport,)
def test_asset_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AssetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_asset_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.asset_v1p2beta1.services.asset_service.transports.AssetServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AssetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_feed",
"get_feed",
"list_feeds",
"update_feed",
"delete_feed",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_asset_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.asset_v1p2beta1.services.asset_service.transports.AssetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AssetServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_asset_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.asset_v1p2beta1.services.asset_service.transports.AssetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AssetServiceTransport()
adc.assert_called_once()
def test_asset_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AssetServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AssetServiceGrpcTransport,
transports.AssetServiceGrpcAsyncIOTransport,
],
)
def test_asset_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AssetServiceGrpcTransport, grpc_helpers),
(transports.AssetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_asset_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudasset.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="cloudasset.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_asset_service_host_no_port():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudasset.googleapis.com"
),
)
assert client.transport._host == "cloudasset.googleapis.com:443"
def test_asset_service_host_with_port():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudasset.googleapis.com:8000"
),
)
assert client.transport._host == "cloudasset.googleapis.com:8000"
def test_asset_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AssetServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_asset_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AssetServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_feed_path():
project = "squid"
feed = "clam"
expected = "projects/{project}/feeds/{feed}".format(project=project, feed=feed,)
actual = AssetServiceClient.feed_path(project, feed)
assert expected == actual
def test_parse_feed_path():
expected = {
"project": "whelk",
"feed": "octopus",
}
path = AssetServiceClient.feed_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_feed_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AssetServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = AssetServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = AssetServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = AssetServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = AssetServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = AssetServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = AssetServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = AssetServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AssetServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = AssetServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AssetServiceTransport, "_prep_wrapped_messages"
) as prep:
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AssetServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AssetServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport),
(AssetServiceAsyncClient, transports.AssetServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-asset
|
tests/unit/gapic/asset_v1p2beta1/test_asset_service.py
|
Python
|
apache-2.0
| 86,657
|
[
"Octopus"
] |
fc4ba40f3cbe3325bee34dfb1e41ded5bd92e1bf6ee6fe37067fc51b1c5a43a9
|
# coding=utf-8
# from types import UnicodeType
from django.db import IntegrityError
from django.test import TestCase, TransactionTestCase
from django.utils.timezone import now
from freezegun import freeze_time
from feedback.models import get_model, Semester, Person, Veranstaltung, Einstellung, Mailvorlage
from feedback.models.base import AlternativVorname, Log, BarcodeScanner, Fachgebiet, FachgebietEmail, BarcodeAllowedState
from feedback.models import past_semester_orders
from feedback.models import ImportPerson, ImportCategory, ImportVeranstaltung, Kommentar
from feedback.models import Fragebogen2008, Fragebogen2009, Ergebnis2008, Ergebnis2009
from feedback.models import Fragebogen2012, Ergebnis2012
from feedback.tests.tools import get_veranstaltung
from django.urls import reverse
from django.contrib.auth.models import User
class InitTest(TestCase):
def setUp(self):
self.s = []
self.s.append(Semester.objects.create(semester=20110, fragebogen='2008'))
self.s.append(Semester.objects.create(semester=20115, fragebogen='2009'))
self.s.append(Semester.objects.create(semester=20125, fragebogen='2012'))
def test_get_model(self):
self.assertEqual(get_model('Fragebogen', self.s[0]), Fragebogen2008)
self.assertEqual(get_model('Fragebogen', self.s[1]), Fragebogen2009)
self.assertEqual(get_model('Fragebogen', self.s[2]), Fragebogen2012)
self.assertEqual(get_model('Ergebnis', self.s[0]), Ergebnis2008)
self.assertEqual(get_model('Ergebnis', self.s[1]), Ergebnis2009)
self.assertEqual(get_model('Ergebnis', self.s[2]), Ergebnis2012)
class pastOrdersTest(TestCase):
def setUp(self):
current_sem = Semester.objects.create(semester=20125, fragebogen='2012')
past_sem = Semester.objects.create(semester=20115, fragebogen='2009')
future_sem = Semester.objects.create(semester=20130, fragebogen='2012')
self.default_params = {'typ': 'v', 'name': 'Stoning I',
'grundstudium': False, 'evaluieren': True}
self.singleLV = Veranstaltung.objects.create(semester=current_sem, lv_nr='20-00-0021-lv', **self.default_params)
self.lv = []
self.lv.append(
Veranstaltung.objects.create(semester=past_sem, lv_nr='20-00-0042-lv', anzahl=10, **self.default_params))
self.lv.append(Veranstaltung.objects.create(semester=current_sem, lv_nr='20-00-0042-lv', **self.default_params))
self.lv.append(Veranstaltung.objects.create(semester=past_sem, lv_nr='20-00-0043-lv', **self.default_params))
self.lv.append(Veranstaltung.objects.create(semester=current_sem, lv_nr='20-00-0043-lv', **self.default_params))
self.lv.append(
Veranstaltung.objects.create(semester=future_sem, lv_nr='20-00-0043-lv', anzahl=10, **self.default_params))
self.erg = []
self.erg.append(Ergebnis2009.objects.create(veranstaltung=self.lv[0], anzahl=5,
v_gesamt=1, v_gesamt_count=2,
ue_gesamt=2, ue_gesamt_count=3,
v_feedbackpreis=3, v_feedbackpreis_count=4))
def test_no_past(self):
"""Die Veranstaltung wird das erste mal gehalten"""
result = past_semester_orders(self.singleLV)
self.assertEqual(len(result), 0)
self.assertEqual(past_semester_orders(self.singleLV), [],
'Die Liste sollte leer sein. Die Veranstaltung wird das erste mal angeboten.')
def test_one_befor(self):
"""Die Veranstaltung wurde schonmal gehalten, es wurde bestellt und auch Ergebnisse geliefert"""
result = past_semester_orders(self.lv[1])
self.assertEqual(len(result), 1)
expeted_dict = {'veranstaltung': self.lv[0], 'anzahl_bestellung': 10, 'anzahl_ruecklauf': 5}
self.assertDictEqual(expeted_dict, result[0])
def test_one_befor_current_ordert(self):
"""Für die aktuelle Veranstaltung wurde eine Bestellung aufgegeben.
In der Vergangenheit gab es die Veranstaltung einmal."""
self.lv[1].anzahl = 42
self.lv[1].save()
result = past_semester_orders(self.lv[1])
self.assertEqual(len(result), 1)
expeted_dict = {'veranstaltung': self.lv[0], 'anzahl_bestellung': 10, 'anzahl_ruecklauf': 5}
self.assertDictEqual(expeted_dict, result[0])
def test_not_exist(self):
"""Die Veranstaltung wurde schon einmal gehalten, es wurde jedoch nichts bestellt"""
result = past_semester_orders(self.lv[3])
self.assertEqual(len(result), 0)
def test_no_result_exist(self):
"""Die Veranstaltung wurde gehalten, es wurde bestellt jedoch gibt es keine Ergebnisse"""
self.lv[2].anzahl = 10
self.lv[2].save()
result = past_semester_orders(self.lv[3])
self.assertEqual(len(result), 1)
expeted_dict = {'veranstaltung': self.lv[2], 'anzahl_bestellung': 10, 'anzahl_ruecklauf': 0}
self.assertDictEqual(expeted_dict, result[0])
class SemesterTest(TestCase):
def setUp(self):
self.ws = Semester.objects.create(semester=20115, fragebogen='test', sichtbarkeit='ADM')
self.ss = Semester.objects.create(semester=20120, fragebogen='test', sichtbarkeit='ADM')
def test_short(self):
self.assertEqual(self.ws.short(), 'WS 2011/2012')
self.assertEqual(self.ss.short(), 'SS 2012')
def test_long(self):
self.assertEqual(self.ws.long(), 'Wintersemester 2011/2012')
self.assertEqual(self.ss.long(), 'Sommersemester 2012')
def test_unicode(self):
self.assertEqual(str(self.ws), 'Wintersemester 2011/2012')
self.assertEqual(str(self.ss), 'Sommersemester 2012')
def test_current(self):
self.assertEqual(Semester.current(), self.ss)
def test_is_unique(self):
with self.assertRaises(IntegrityError):
Semester.objects.create(semester=20115, fragebogen='foo', sichtbarkeit='ALL')
class FachgebietTest(TestCase):
def setUp(self):
self.fg = Fachgebiet.objects.create(name="Software Technology Group", kuerzel="STG")
def test_name(self):
self.assertEqual(self.fg.name, "Software Technology Group")
def test_kuerzel(self):
self.assertEqual(self.fg.kuerzel, "STG")
class FachgebietEmailTest(TestCase):
def setUp(self):
User.objects.create_superuser('supers', None, 'pw')
self.fg = Fachgebiet.objects.create(name="Software Technology Group", kuerzel="STG")
self.fge = FachgebietEmail.objects.create(fachgebiet=self.fg, email_suffix="stg.tu-darmstadt.de",
email_sekretaerin="sek@stg.tu-darmstadt.de")
self.fg1 = Fachgebiet.objects.create(name="FG1", kuerzel="FG1")
self.p = Person.objects.create(vorname="Je", nachname="Mand", email="je.mand@stg.tu-darmstadt.de")
self.p1 = Person.objects.create(vorname="An", nachname="Derer", email="an.derer@fg1.de")
def test_fachgebiet(self):
self.assertEqual(self.fge.fachgebiet_id, self.fg.id)
def test_email_suffix(self):
self.assertEqual(self.fge.email_suffix, "stg.tu-darmstadt.de")
def test_email_sekretaerin(self):
self.assertEqual(self.fge.email_sekretaerin, "sek@stg.tu-darmstadt.de")
def test_get_fachgebiet_from_email(self):
fg = FachgebietEmail.get_fachgebiet_from_email(self.p.email)
self.assertEqual(self.fg, fg)
self.assertRaises(Exception, FachgebietEmail.get_fachgebiet_from_email(''))
def test_admin_assign_person(self):
self.assertTrue(self.client.login(username='supers', password='pw'))
update_url = reverse("admin:feedback_fachgebiet_change", args=(self.fg1.id,))
data = {
'name': self.fg1.name,
'kuerzel': self.fg1.kuerzel,
'fachgebiet-TOTAL_FORMS': 1,
'fachgebiet-INITIAL_FORMS': 0,
'fachgebiet-MIN_NUM_FORMS': 0,
'fachgebiet-MAX_NUM_FORMS': 1000,
'fachgebiet-0-fachgebiet': self.fg1.id,
'fachgebiet-0-email_suffix': 'fg1.de',
'_save': 'Sichern'
}
response = self.client.post(update_url, data, **{'REMOTE_USER': 'super'})
self.assertEqual(response.status_code, 302)
self.p1.refresh_from_db()
self.assertEqual(self.p1.fachgebiet, self.fg1)
class PersonTest(TestCase):
def setUp(self):
User.objects.create_superuser('supers', None, 'pw')
self.p1 = Person.objects.create(vorname='Brian', nachname='Cohen')
self.p2 = Person.objects.create(vorname='Bud', nachname='Spencer', email='x@y.z')
self.p3 = Person.objects.create(vorname='Test', nachname='Tester', email='a@b.c', geschlecht='m')
self.p4 = Person.objects.create(vorname='Test Zweitname', nachname='Tester')
self.s1, self.v1 = get_veranstaltung('v')
self.v1.veranstalter.add(self.p1)
self.s2 = Semester.objects.get_or_create(semester=20115, fragebogen='2009', sichtbarkeit='ADM')[0]
default_params = {'semester': self.s2, 'grundstudium': False, 'evaluieren': True, 'lv_nr': '321' + 'v'}
self.v2 = Veranstaltung.objects.create(typ='v', name='CMS', **default_params)
self.v2.veranstalter.add(self.p4)
self.fachgebiet1 = Fachgebiet.objects.create(name="Fachgebiet1", kuerzel="FB1")
FachgebietEmail.objects.create(fachgebiet=self.fachgebiet1, email_suffix="fb1.tud.de")
self.fb_p1 = Person.objects.create(vorname="Max1", nachname="Mustername",
email="max1mustermann@fb1.tud.de")
def test_full_name(self):
self.assertEqual(self.p1.full_name(), 'Brian Cohen')
def test_unicode(self):
self.assertEqual(str(self.p1), 'Cohen, Brian')
def test_create_from_import_person(self):
ip = ImportPerson(vorname='Brian', nachname='Cohen')
p = Person.create_from_import_person(ip)
self.assertEqual(p, self.p1)
ip.vorname = 'Eric'
p = Person.create_from_import_person(ip)
self.assertNotEqual(p, self.p1)
def test_persons_to_edit(self):
to_edit_persons = Person.persons_to_edit(semester=self.s1)
self.assertEqual(to_edit_persons.count(), 1)
self.assertTrue(to_edit_persons.filter(vorname='Brian').exists())
def test_all_edited_persons(self):
edited_persons = Person.all_edited_persons()
self.assertEqual(edited_persons.count(), 1)
self.assertTrue(edited_persons.filter(email='a@b.c', geschlecht='m').exists())
def test_persons_with_similar_names(self):
similar_persons = Person.persons_with_similar_names('Test', 'Tester')
self.assertEqual(similar_persons.count(), 1)
self.assertTrue(similar_persons.filter(vorname='Test').exists())
def test_veranstaltungen(self):
veranstaltungen = Person.veranstaltungen(self.p1)
veranstalter_name = veranstaltungen.filter(veranstalter=self.p1)[0].veranstalter.get().full_name()
self.assertEqual(veranstaltungen.count(), 1)
self.assertEqual(self.p1.full_name(), veranstalter_name)
def test_replace_veranstalter(self):
# when
Person.replace_veranstalter(self.p4, self.p3)
# then
self.assertFalse(Person.veranstaltungen(self.p4).exists())
self.assertEqual(Person.veranstaltungen(self.p3).count(), 1)
self.assertEqual(AlternativVorname.objects.get().vorname, self.p4.vorname)
self.assertEqual(AlternativVorname.objects.get().person, self.p3)
def test_is_veranstalter(self):
is_veranstalter1 = Person.is_veranstalter(self.p1)
is_veranstalter2 = Person.is_veranstalter(self.p2)
self.assertTrue(is_veranstalter1)
self.assertFalse(is_veranstalter2)
def test_person_admin_assign_fachgebiet(self):
self.assertTrue(self.client.login(username='supers', password='pw'))
update_url = reverse("admin:feedback_person_changelist")
data = {'action': 'assign_fachgebiet_action',
'_selected_action': [str(f.pk) for f in [self.fb_p1]]}
response = self.client.post(update_url, data, **{'REMOTE_USER': 'super'})
self.assertEqual(response.status_code, 200)
data["apply"] = True
data["selectedPerson"] = [self.fb_p1.id]
data["fachgebiet_" + str(self.fb_p1.id)] = self.fachgebiet1.id
self.assertEqual(self.fb_p1.fachgebiet, None)
response = self.client.post(update_url, data, **{'REMOTE_USER': 'super'})
self.assertEqual(response.status_code, 302)
self.fb_p1.refresh_from_db()
self.assertEqual(self.fb_p1.fachgebiet, self.fachgebiet1)
class BarcodeScanTest(TestCase):
def setUp(self):
self.barcode_scanner = BarcodeScanner.objects.create(token="LRh73Ds22", description="description1")
self.barcode_scanner2 = BarcodeScanner.objects.create(token="KHzz211d", description="description2")
def test_equal_state_onscanners(self):
try:
# Gleicher Status auf zwei Barcodescanner möglich
BarcodeAllowedState.objects.create(barcode_scanner=self.barcode_scanner,
allow_state=Veranstaltung.STATUS_GEDRUCKT)
BarcodeAllowedState.objects.create(barcode_scanner=self.barcode_scanner2,
allow_state=Veranstaltung.STATUS_GEDRUCKT)
except IntegrityError:
self.fail()
def test_same_state_onscanner(self):
BarcodeAllowedState.objects.create(barcode_scanner=self.barcode_scanner,
allow_state=Veranstaltung.STATUS_GEDRUCKT)
with self.assertRaises(IntegrityError):
BarcodeAllowedState.objects.create(barcode_scanner=self.barcode_scanner,
allow_state=Veranstaltung.STATUS_GEDRUCKT)
class VeranstaltungTest(TransactionTestCase):
def setUp(self):
self.s = []
self.s.append(Semester.objects.create(semester=20110, fragebogen='test', sichtbarkeit='ADM'))
self.s.append(Semester.objects.create(semester=20115, fragebogen='test', sichtbarkeit='ADM'))
self.default_params = {'semester': self.s[0], 'grundstudium': False, 'evaluieren': True}
self.v = []
self.v.append(Veranstaltung.objects.create(typ='v', name='Stoning I', **self.default_params))
self.v.append(Veranstaltung.objects.create(typ='vu', name='Stoning II', **self.default_params))
self.v.append(Veranstaltung.objects.create(typ='pr', name='Stoning III', **self.default_params))
self.v.append(Veranstaltung.objects.create(typ='se', name='Stoning IV', **self.default_params))
self.v.append(Veranstaltung.objects.create(typ='v',
name='Stoning V',
status=Veranstaltung.STATUS_GEDRUCKT,
**self.default_params))
self.scanner = BarcodeScanner.objects.create(token="aa", description="description")
User.objects.create_superuser('supers', None, 'pw')
def test_get_evasys_typ(self):
self.assertEqual(self.v[0].get_evasys_typ(), 1)
self.assertEqual(self.v[1].get_evasys_typ(), 9)
self.assertEqual(self.v[2].get_evasys_typ(), 5)
self.assertEqual(self.v[3].get_evasys_typ(), 2)
def test_status(self):
self.assertEqual(self.v[0].status, Veranstaltung.STATUS_ANGELEGT)
self.assertEqual(self.v[4].status, Veranstaltung.STATUS_GEDRUCKT)
def test_set_next_state(self):
veranstaltung = self.v[4]
veranstaltung.status = Veranstaltung.STATUS_BESTELLUNG_GEOEFFNET
veranstaltung.set_next_state()
self.assertEqual(veranstaltung.status, Veranstaltung.STATUS_BESTELLUNG_LIEGT_VOR)
veranstaltung.set_next_state()
self.assertEqual(veranstaltung.status, Veranstaltung.STATUS_BESTELLUNG_LIEGT_VOR)
veranstaltung.status = Veranstaltung.STATUS_GEDRUCKT
veranstaltung.set_next_state()
self.assertEqual(veranstaltung.status, Veranstaltung.STATUS_VERSANDT)
def test_log(self):
self.v[0].log(None)
self.assertEqual(Log.objects.count(), 0)
self.v[0].log(self.scanner)
self.assertEqual(Log.objects.count(), 1)
self.assertEqual(Log.objects.get(veranstaltung=self.v[0]).scanner, self.scanner)
self.assertEqual(Log.objects.get(veranstaltung=self.v[0]).interface, Log.SCANNER)
def test_admin_veranstaltung(self):
self.assertTrue(self.client.login(username='supers', password='pw'))
update_url = reverse("admin:feedback_veranstaltung_changelist")
data = {'action': 'status_aendern_action',
'_selected_action': [str(f.pk) for f in [self.v[0]]]}
response = self.client.post(update_url, data, **{'REMOTE_USER': 'super'})
self.assertEqual(response.status_code, 200)
data["apply"] = True
data["status"] = Veranstaltung.STATUS_BOEGEN_GESCANNT
response = self.client.post(update_url, data, **{'REMOTE_USER': 'super'})
self.assertEqual(response.status_code, 302)
self.assertEqual(Log.objects.count(), 1)
self.assertEqual(Log.objects.get(veranstaltung=self.v[0]).interface, Log.ADMIN)
@staticmethod
def change_status(v, new_status):
v.status = new_status
v.save()
def test_get_next_state(self):
v = self.v[0]
self.assertEqual(v.get_next_state(), Veranstaltung.STATUS_GEDRUCKT)
self.change_status(v, Veranstaltung.STATUS_GEDRUCKT)
self.assertEqual(v.get_next_state(), Veranstaltung.STATUS_VERSANDT)
self.change_status(v, Veranstaltung.STATUS_VERSANDT)
self.assertEqual(v.get_next_state(), Veranstaltung.STATUS_BOEGEN_EINGEGANGEN)
self.change_status(v, Veranstaltung.STATUS_BOEGEN_EINGEGANGEN)
self.assertEqual(v.get_next_state(), Veranstaltung.STATUS_BOEGEN_GESCANNT)
self.change_status(v, Veranstaltung.STATUS_BOEGEN_GESCANNT)
self.assertEqual(v.get_next_state(), Veranstaltung.STATUS_ERGEBNISSE_VERSANDT)
self.change_status(v, Veranstaltung.STATUS_ERGEBNISSE_VERSANDT)
self.assertIsNone(v.get_next_state())
def test_has_uebung(self):
self.assertTrue(self.v[1].has_uebung())
self.assertFalse(self.v[0].has_uebung())
def test_unicode(self):
self.assertEqual(str(self.v[0]), 'Stoning I [v] (SS 2011)')
def test_save(self):
self.v[0].access_token = ''
self.v[0].save()
self.assertNotEqual(self.v[0].access_token, '')
self.assertTrue(len(self.v[0].access_token) == 16)
def test_unique(self):
# Veranstaltung soll unique über name, lv_nr und semester sein
with self.assertRaises(IntegrityError):
Veranstaltung.objects.create(typ='v', name='Stoning I', **self.default_params)
# eine Änderung einzelner Attribute soll trotzdem erlaubt sein
try:
# name
Veranstaltung.objects.create(typ='v', name='Stoning XXIII', **self.default_params)
except IntegrityError:
self.fail()
try:
# lv_nr
Veranstaltung.objects.create(typ='v', name='Stoning I', lv_nr='42', **self.default_params)
except IntegrityError:
self.fail()
try:
# semester
params = self.default_params.copy()
params['semester'] = self.s[1]
Veranstaltung.objects.create(typ='v', name='Stoning I', **params)
except IntegrityError:
self.fail()
def test_no_lang_set(self):
"""Wenn keine Sprache gesetzt ist gibt wird kein Bogen ausgewählt"""
self.assertEqual(self.v[0].get_evasys_bogen(), '')
def test_veranstalter_url(self):
"""Die URL für den Veranstalter soll die id und den Token enthalten"""
url = self.v[0].link_veranstalter()
url_parts = url.split('&')
self.assertEqual(len(url_parts), 2)
ver_id = self.v[0].id
self.assertEqual(url_parts[0],
'https://www.fachschaft.informatik.tu-darmstadt.de/veranstalter/login/?vid=%d' % ver_id)
access_token = self.v[0].access_token
self.assertEqual('token=' + access_token, url_parts[1])
# FIXME: Is the below assert necessary? Since Python3 strings are all Unicode
# not_saved_v = Veranstaltung()
# self.assertEqual(type(not_saved_v.link_veranstalter()), UnicodeType)
class EinstellungTest(TestCase):
def setUp(self):
self.a = Einstellung.objects.create(name='spam', wert='bacon')
self.b = Einstellung.objects.create(name='sausage', wert='eggs')
def test_get(self):
self.assertEqual(Einstellung.get('spam'), self.a.wert)
self.assertEqual(Einstellung.get('sausage'), self.b.wert)
def test_unicode(self):
self.assertEqual(str(self.a), 'spam = "bacon"')
self.assertEqual(str(self.b), 'sausage = "eggs"')
def test_unique(self):
with self.assertRaises(IntegrityError):
Einstellung.objects.create(name='spam', wert='cheese')
class MailvorlageTest(TestCase):
def setUp(self):
self.m = Mailvorlage.objects.create(subject='Nobody expects', body='the Spanish Inquisition')
def test_unicode(self):
self.assertEqual(str(self.m), 'Nobody expects')
def test_unique(self):
with self.assertRaises(IntegrityError):
Mailvorlage.objects.create(subject='Nobody expects', body='the spammish repetition')
class ImportPersonTest(TestCase):
def setUp(self):
self.ip = ImportPerson.objects.create(vorname='Brian', nachname='Cohen')
def test_full_name(self):
self.assertEqual(self.ip.full_name(), 'Brian Cohen')
def test_unicode(self):
self.assertEqual(str(self.ip), 'Cohen, Brian')
class ImportCategoryTest(TestCase):
def setUp(self):
self.ic = ImportCategory.objects.create(name='Spam')
def test_unicode(self):
self.assertEqual(str(self.ic), 'Spam')
class ImportVeranstaltungTest(TestCase):
def setUp(self):
self.c = ImportCategory.objects.create(name='Sketches')
self.iv = ImportVeranstaltung.objects.create(typ='v', name='Dead Parrot', lv_nr='42',
category=self.c, is_attended_course=True)
def test_unicode(self):
self.assertEqual(str(self.iv), 'Dead Parrot (42)')
class FragebogenTest(TestCase):
def setUp(self):
self.s, self.v = get_veranstaltung('v')
self.f = Fragebogen2009.objects.create(veranstaltung=self.v, v_gesamt=1)
def test_unicode(self):
self.assertEqual(str(self.f), 'Fragebogen zu "Stoning I" (Vorlesung, SS 2011)')
class ErgebnisTest(TestCase):
def setUp(self):
self.s, self.vu = get_veranstaltung('vu')
self.s, self.v = get_veranstaltung('v')
self.eu = Ergebnis2009.objects.create(veranstaltung=self.vu, anzahl=5,
v_gesamt=1, v_gesamt_count=2,
ue_gesamt=2, ue_gesamt_count=3,
v_feedbackpreis=3, v_feedbackpreis_count=4)
self.e = Ergebnis2009.objects.create(veranstaltung=self.v, anzahl=5,
v_gesamt=1, v_gesamt_count=2,
ue_gesamt=2, ue_gesamt_count=3,
v_feedbackpreis=3, v_feedbackpreis_count=4)
self.parts_vl = [[None, 0], [1, 2]] + [[None, 0]] * 3
self.parts_ue = [[2, 3]] + [[None, 0]] * 3
self.parts_ue_empty = [[None, 0]] * 4
self.parts_hidden = [[3, 4], [None, 0]]
def test_values(self):
self.assertListEqual(list(self.eu.values()), self.parts_vl + self.parts_ue)
self.assertListEqual(list(self.e.values()), self.parts_vl + self.parts_ue_empty)
def test_all_values(self):
self.assertListEqual(self.eu.all_values(), self.parts_vl + self.parts_ue + self.parts_hidden)
self.assertListEqual(self.e.all_values(), self.parts_vl + self.parts_ue_empty + self.parts_hidden)
def test_unicode(self):
self.assertEqual(str(self.e), 'Ergebnisse zu "Stoning I" (Vorlesung, Sommersemester 2011)')
def test_unique(self):
with self.assertRaises(IntegrityError):
Ergebnis2009.objects.create(veranstaltung=self.v)
class KommentarTest(TestCase):
def setUp(self):
self.s, self.v = get_veranstaltung('v')
self.p = []
self.p.append(Person.objects.create(vorname='Brian', nachname='Cohen'))
self.p.append(Person.objects.create(vorname='The', nachname='Crowd'))
self.k = Kommentar.objects.create(veranstaltung=self.v, autor=self.p[0], text='You are all individuals!')
def test_semester(self):
self.assertEqual(self.k.semester(), self.s)
def test_typ(self):
self.assertEqual(self.k.typ(), 'Vorlesung')
def test_name(self):
self.assertEqual(self.k.name(), 'Stoning I')
def test_unicode(self):
self.assertEqual(str(self.k), 'Kommentar zu "Stoning I" (Vorlesung, Sommersemester 2011)')
def test_unique(self):
with self.assertRaises(IntegrityError):
Kommentar.objects.create(veranstaltung=self.v, autor=self.p[1], text="I'm not.")
class LogTest(TestCase):
@freeze_time("2016-12-06")
def setUp(self):
self.time = now()
self.s, self.v = get_veranstaltung('v')
self.scanner = BarcodeScanner.objects.create(token="aa", description="description")
self.log = Log.objects.create(veranstaltung=self.v, timestamp=self.time, status=Veranstaltung.STATUS_GEDRUCKT,
user=None, scanner=self.scanner, interface=Log.ADMIN)
def test_name(self):
self.assertEqual(self.log.veranstaltung.pk, self.v.pk)
def test_timestamp(self):
self.assertEqual(self.log.timestamp, self.time)
def test_status(self):
self.assertEqual(self.log.status, Veranstaltung.STATUS_GEDRUCKT)
def test_scanner_attr(self):
self.assertEqual(self.log.scanner, self.scanner)
def test_interface(self):
self.assertEqual(self.log.interface, Log.ADMIN)
|
TheMangalex/pyfeedback
|
src/feedback/tests/test_models.py
|
Python
|
agpl-3.0
| 26,577
|
[
"Brian"
] |
53033f3c03ecc039cd657ba04ad2819904fa8426484231730ee11074e8b58cf3
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.agents import AgentsAsyncClient
from google.cloud.dialogflow_v2beta1.services.agents import AgentsClient
from google.cloud.dialogflow_v2beta1.services.agents import pagers
from google.cloud.dialogflow_v2beta1.services.agents import transports
from google.cloud.dialogflow_v2beta1.types import agent
from google.cloud.dialogflow_v2beta1.types import agent as gcd_agent
from google.cloud.dialogflow_v2beta1.types import validation_result
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AgentsClient._get_default_mtls_endpoint(None) is None
assert AgentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
AgentsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
)
assert (
AgentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AgentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert AgentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [AgentsClient, AgentsAsyncClient,])
def test_agents_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.AgentsGrpcTransport, "grpc"),
(transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_agents_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [AgentsClient, AgentsAsyncClient,])
def test_agents_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_agents_client_get_transport_class():
transport = AgentsClient.get_transport_class()
available_transports = [
transports.AgentsGrpcTransport,
]
assert transport in available_transports
transport = AgentsClient.get_transport_class("grpc")
assert transport == transports.AgentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AgentsClient, transports.AgentsGrpcTransport, "grpc"),
(AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
AgentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsClient)
)
@mock.patch.object(
AgentsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsAsyncClient)
)
def test_agents_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AgentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AgentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(AgentsClient, transports.AgentsGrpcTransport, "grpc", "true"),
(
AgentsAsyncClient,
transports.AgentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(AgentsClient, transports.AgentsGrpcTransport, "grpc", "false"),
(
AgentsAsyncClient,
transports.AgentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AgentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsClient)
)
@mock.patch.object(
AgentsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsAsyncClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_agents_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [AgentsClient, AgentsAsyncClient])
@mock.patch.object(
AgentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsClient)
)
@mock.patch.object(
AgentsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsAsyncClient)
)
def test_agents_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AgentsClient, transports.AgentsGrpcTransport, "grpc"),
(AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_agents_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(AgentsClient, transports.AgentsGrpcTransport, "grpc", grpc_helpers),
(
AgentsAsyncClient,
transports.AgentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_agents_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_agents_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.agents.transports.AgentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AgentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(AgentsClient, transports.AgentsGrpcTransport, "grpc", grpc_helpers),
(
AgentsAsyncClient,
transports.AgentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_agents_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [agent.GetAgentRequest, dict,])
def test_get_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = agent.Agent(
parent="parent_value",
display_name="display_name_value",
default_language_code="default_language_code_value",
supported_language_codes=["supported_language_codes_value"],
time_zone="time_zone_value",
description="description_value",
avatar_uri="avatar_uri_value",
enable_logging=True,
match_mode=agent.Agent.MatchMode.MATCH_MODE_HYBRID,
classification_threshold=0.25520000000000004,
api_version=agent.Agent.ApiVersion.API_VERSION_V1,
tier=agent.Agent.Tier.TIER_STANDARD,
)
response = client.get_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.GetAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, agent.Agent)
assert response.parent == "parent_value"
assert response.display_name == "display_name_value"
assert response.default_language_code == "default_language_code_value"
assert response.supported_language_codes == ["supported_language_codes_value"]
assert response.time_zone == "time_zone_value"
assert response.description == "description_value"
assert response.avatar_uri == "avatar_uri_value"
assert response.enable_logging is True
assert response.match_mode == agent.Agent.MatchMode.MATCH_MODE_HYBRID
assert math.isclose(
response.classification_threshold, 0.25520000000000004, rel_tol=1e-6
)
assert response.api_version == agent.Agent.ApiVersion.API_VERSION_V1
assert response.tier == agent.Agent.Tier.TIER_STANDARD
def test_get_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
client.get_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.GetAgentRequest()
@pytest.mark.asyncio
async def test_get_agent_async(
transport: str = "grpc_asyncio", request_type=agent.GetAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
agent.Agent(
parent="parent_value",
display_name="display_name_value",
default_language_code="default_language_code_value",
supported_language_codes=["supported_language_codes_value"],
time_zone="time_zone_value",
description="description_value",
avatar_uri="avatar_uri_value",
enable_logging=True,
match_mode=agent.Agent.MatchMode.MATCH_MODE_HYBRID,
classification_threshold=0.25520000000000004,
api_version=agent.Agent.ApiVersion.API_VERSION_V1,
tier=agent.Agent.Tier.TIER_STANDARD,
)
)
response = await client.get_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.GetAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, agent.Agent)
assert response.parent == "parent_value"
assert response.display_name == "display_name_value"
assert response.default_language_code == "default_language_code_value"
assert response.supported_language_codes == ["supported_language_codes_value"]
assert response.time_zone == "time_zone_value"
assert response.description == "description_value"
assert response.avatar_uri == "avatar_uri_value"
assert response.enable_logging is True
assert response.match_mode == agent.Agent.MatchMode.MATCH_MODE_HYBRID
assert math.isclose(
response.classification_threshold, 0.25520000000000004, rel_tol=1e-6
)
assert response.api_version == agent.Agent.ApiVersion.API_VERSION_V1
assert response.tier == agent.Agent.Tier.TIER_STANDARD
@pytest.mark.asyncio
async def test_get_agent_async_from_dict():
await test_get_agent_async(request_type=dict)
def test_get_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.GetAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
call.return_value = agent.Agent()
client.get_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.GetAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(agent.Agent())
await client.get_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_get_agent_flattened():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = agent.Agent()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_get_agent_flattened_error():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_agent(
agent.GetAgentRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_get_agent_flattened_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = agent.Agent()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(agent.Agent())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_agent_flattened_error_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_agent(
agent.GetAgentRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [gcd_agent.SetAgentRequest, dict,])
def test_set_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_agent.Agent(
parent="parent_value",
display_name="display_name_value",
default_language_code="default_language_code_value",
supported_language_codes=["supported_language_codes_value"],
time_zone="time_zone_value",
description="description_value",
avatar_uri="avatar_uri_value",
enable_logging=True,
match_mode=gcd_agent.Agent.MatchMode.MATCH_MODE_HYBRID,
classification_threshold=0.25520000000000004,
api_version=gcd_agent.Agent.ApiVersion.API_VERSION_V1,
tier=gcd_agent.Agent.Tier.TIER_STANDARD,
)
response = client.set_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_agent.SetAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_agent.Agent)
assert response.parent == "parent_value"
assert response.display_name == "display_name_value"
assert response.default_language_code == "default_language_code_value"
assert response.supported_language_codes == ["supported_language_codes_value"]
assert response.time_zone == "time_zone_value"
assert response.description == "description_value"
assert response.avatar_uri == "avatar_uri_value"
assert response.enable_logging is True
assert response.match_mode == gcd_agent.Agent.MatchMode.MATCH_MODE_HYBRID
assert math.isclose(
response.classification_threshold, 0.25520000000000004, rel_tol=1e-6
)
assert response.api_version == gcd_agent.Agent.ApiVersion.API_VERSION_V1
assert response.tier == gcd_agent.Agent.Tier.TIER_STANDARD
def test_set_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
client.set_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_agent.SetAgentRequest()
@pytest.mark.asyncio
async def test_set_agent_async(
transport: str = "grpc_asyncio", request_type=gcd_agent.SetAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_agent.Agent(
parent="parent_value",
display_name="display_name_value",
default_language_code="default_language_code_value",
supported_language_codes=["supported_language_codes_value"],
time_zone="time_zone_value",
description="description_value",
avatar_uri="avatar_uri_value",
enable_logging=True,
match_mode=gcd_agent.Agent.MatchMode.MATCH_MODE_HYBRID,
classification_threshold=0.25520000000000004,
api_version=gcd_agent.Agent.ApiVersion.API_VERSION_V1,
tier=gcd_agent.Agent.Tier.TIER_STANDARD,
)
)
response = await client.set_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_agent.SetAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_agent.Agent)
assert response.parent == "parent_value"
assert response.display_name == "display_name_value"
assert response.default_language_code == "default_language_code_value"
assert response.supported_language_codes == ["supported_language_codes_value"]
assert response.time_zone == "time_zone_value"
assert response.description == "description_value"
assert response.avatar_uri == "avatar_uri_value"
assert response.enable_logging is True
assert response.match_mode == gcd_agent.Agent.MatchMode.MATCH_MODE_HYBRID
assert math.isclose(
response.classification_threshold, 0.25520000000000004, rel_tol=1e-6
)
assert response.api_version == gcd_agent.Agent.ApiVersion.API_VERSION_V1
assert response.tier == gcd_agent.Agent.Tier.TIER_STANDARD
@pytest.mark.asyncio
async def test_set_agent_async_from_dict():
await test_set_agent_async(request_type=dict)
def test_set_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_agent.SetAgentRequest()
request.agent.parent = "agent.parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
call.return_value = gcd_agent.Agent()
client.set_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "agent.parent=agent.parent/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_set_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_agent.SetAgentRequest()
request.agent.parent = "agent.parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_agent.Agent())
await client.set_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "agent.parent=agent.parent/value",) in kw[
"metadata"
]
def test_set_agent_flattened():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_agent.Agent()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_agent(agent=gcd_agent.Agent(parent="parent_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].agent
mock_val = gcd_agent.Agent(parent="parent_value")
assert arg == mock_val
def test_set_agent_flattened_error():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_agent(
gcd_agent.SetAgentRequest(), agent=gcd_agent.Agent(parent="parent_value"),
)
@pytest.mark.asyncio
async def test_set_agent_flattened_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_agent.Agent()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_agent.Agent())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_agent(agent=gcd_agent.Agent(parent="parent_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].agent
mock_val = gcd_agent.Agent(parent="parent_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_agent_flattened_error_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_agent(
gcd_agent.SetAgentRequest(), agent=gcd_agent.Agent(parent="parent_value"),
)
@pytest.mark.parametrize("request_type", [agent.DeleteAgentRequest, dict,])
def test_delete_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.DeleteAgentRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
client.delete_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.DeleteAgentRequest()
@pytest.mark.asyncio
async def test_delete_agent_async(
transport: str = "grpc_asyncio", request_type=agent.DeleteAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.DeleteAgentRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_agent_async_from_dict():
await test_delete_agent_async(request_type=dict)
def test_delete_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.DeleteAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
call.return_value = None
client.delete_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.DeleteAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_delete_agent_flattened():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_delete_agent_flattened_error():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_agent(
agent.DeleteAgentRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_delete_agent_flattened_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_agent_flattened_error_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_agent(
agent.DeleteAgentRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [agent.SearchAgentsRequest, dict,])
def test_search_agents(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = agent.SearchAgentsResponse(
next_page_token="next_page_token_value",
)
response = client.search_agents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.SearchAgentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchAgentsPager)
assert response.next_page_token == "next_page_token_value"
def test_search_agents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
client.search_agents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.SearchAgentsRequest()
@pytest.mark.asyncio
async def test_search_agents_async(
transport: str = "grpc_asyncio", request_type=agent.SearchAgentsRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
agent.SearchAgentsResponse(next_page_token="next_page_token_value",)
)
response = await client.search_agents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.SearchAgentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchAgentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_agents_async_from_dict():
await test_search_agents_async(request_type=dict)
def test_search_agents_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.SearchAgentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
call.return_value = agent.SearchAgentsResponse()
client.search_agents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_agents_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.SearchAgentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
agent.SearchAgentsResponse()
)
await client.search_agents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_search_agents_flattened():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = agent.SearchAgentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_agents(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_search_agents_flattened_error():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_agents(
agent.SearchAgentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_search_agents_flattened_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = agent.SearchAgentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
agent.SearchAgentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_agents(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_agents_flattened_error_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_agents(
agent.SearchAgentsRequest(), parent="parent_value",
)
def test_search_agents_pager(transport_name: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
agent.SearchAgentsResponse(
agents=[agent.Agent(), agent.Agent(), agent.Agent(),],
next_page_token="abc",
),
agent.SearchAgentsResponse(agents=[], next_page_token="def",),
agent.SearchAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",),
agent.SearchAgentsResponse(agents=[agent.Agent(), agent.Agent(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.search_agents(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, agent.Agent) for i in results)
def test_search_agents_pages(transport_name: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_agents), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
agent.SearchAgentsResponse(
agents=[agent.Agent(), agent.Agent(), agent.Agent(),],
next_page_token="abc",
),
agent.SearchAgentsResponse(agents=[], next_page_token="def",),
agent.SearchAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",),
agent.SearchAgentsResponse(agents=[agent.Agent(), agent.Agent(),],),
RuntimeError,
)
pages = list(client.search_agents(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_agents_async_pager():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_agents), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
agent.SearchAgentsResponse(
agents=[agent.Agent(), agent.Agent(), agent.Agent(),],
next_page_token="abc",
),
agent.SearchAgentsResponse(agents=[], next_page_token="def",),
agent.SearchAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",),
agent.SearchAgentsResponse(agents=[agent.Agent(), agent.Agent(),],),
RuntimeError,
)
async_pager = await client.search_agents(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, agent.Agent) for i in responses)
@pytest.mark.asyncio
async def test_search_agents_async_pages():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_agents), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
agent.SearchAgentsResponse(
agents=[agent.Agent(), agent.Agent(), agent.Agent(),],
next_page_token="abc",
),
agent.SearchAgentsResponse(agents=[], next_page_token="def",),
agent.SearchAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",),
agent.SearchAgentsResponse(agents=[agent.Agent(), agent.Agent(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.search_agents(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [agent.TrainAgentRequest, dict,])
def test_train_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.train_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.TrainAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_train_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
client.train_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.TrainAgentRequest()
@pytest.mark.asyncio
async def test_train_agent_async(
transport: str = "grpc_asyncio", request_type=agent.TrainAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.train_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.TrainAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_train_agent_async_from_dict():
await test_train_agent_async(request_type=dict)
def test_train_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.TrainAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.train_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_train_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.TrainAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.train_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_train_agent_flattened():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.train_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_train_agent_flattened_error():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.train_agent(
agent.TrainAgentRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_train_agent_flattened_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.train_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.train_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_train_agent_flattened_error_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.train_agent(
agent.TrainAgentRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [agent.ExportAgentRequest, dict,])
def test_export_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.ExportAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
client.export_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.ExportAgentRequest()
@pytest.mark.asyncio
async def test_export_agent_async(
transport: str = "grpc_asyncio", request_type=agent.ExportAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.ExportAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_agent_async_from_dict():
await test_export_agent_async(request_type=dict)
def test_export_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.ExportAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.ExportAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.export_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_export_agent_flattened():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_export_agent_flattened_error():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_agent(
agent.ExportAgentRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_export_agent_flattened_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_agent(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_export_agent_flattened_error_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_agent(
agent.ExportAgentRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [agent.ImportAgentRequest, dict,])
def test_import_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.ImportAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_agent), "__call__") as call:
client.import_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.ImportAgentRequest()
@pytest.mark.asyncio
async def test_import_agent_async(
transport: str = "grpc_asyncio", request_type=agent.ImportAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.ImportAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_agent_async_from_dict():
await test_import_agent_async(request_type=dict)
def test_import_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.ImportAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_agent), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.ImportAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [agent.RestoreAgentRequest, dict,])
def test_restore_agent(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.restore_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.RestoreAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_restore_agent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_agent), "__call__") as call:
client.restore_agent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.RestoreAgentRequest()
@pytest.mark.asyncio
async def test_restore_agent_async(
transport: str = "grpc_asyncio", request_type=agent.RestoreAgentRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_agent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.restore_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.RestoreAgentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_restore_agent_async_from_dict():
await test_restore_agent_async(request_type=dict)
def test_restore_agent_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.RestoreAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_agent), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.restore_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_restore_agent_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.RestoreAgentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_agent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.restore_agent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [agent.GetValidationResultRequest, dict,])
def test_get_validation_result(request_type, transport: str = "grpc"):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_validation_result), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = validation_result.ValidationResult()
response = client.get_validation_result(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == agent.GetValidationResultRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, validation_result.ValidationResult)
def test_get_validation_result_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_validation_result), "__call__"
) as call:
client.get_validation_result()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == agent.GetValidationResultRequest()
@pytest.mark.asyncio
async def test_get_validation_result_async(
transport: str = "grpc_asyncio", request_type=agent.GetValidationResultRequest
):
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_validation_result), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
validation_result.ValidationResult()
)
response = await client.get_validation_result(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == agent.GetValidationResultRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, validation_result.ValidationResult)
@pytest.mark.asyncio
async def test_get_validation_result_async_from_dict():
await test_get_validation_result_async(request_type=dict)
def test_get_validation_result_field_headers():
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.GetValidationResultRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_validation_result), "__call__"
) as call:
call.return_value = validation_result.ValidationResult()
client.get_validation_result(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_validation_result_field_headers_async():
client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = agent.GetValidationResultRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_validation_result), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
validation_result.ValidationResult()
)
await client.get_validation_result(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AgentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AgentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AgentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.AgentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AgentsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AgentsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.AgentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AgentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AgentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AgentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AgentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AgentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AgentsGrpcTransport,)
def test_agents_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AgentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_agents_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.agents.transports.AgentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AgentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"get_agent",
"set_agent",
"delete_agent",
"search_agents",
"train_agent",
"export_agent",
"import_agent",
"restore_agent",
"get_validation_result",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_agents_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.agents.transports.AgentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AgentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_agents_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.agents.transports.AgentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AgentsTransport()
adc.assert_called_once()
def test_agents_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AgentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport,],
)
def test_agents_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AgentsGrpcTransport, grpc_helpers),
(transports.AgentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_agents_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport],
)
def test_agents_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_agents_host_no_port():
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_agents_host_with_port():
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_agents_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AgentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_agents_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AgentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport],
)
def test_agents_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport],
)
def test_agents_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_agents_grpc_lro_client():
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_agents_grpc_lro_async_client():
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_agent_path():
project = "squid"
expected = "projects/{project}/agent".format(project=project,)
actual = AgentsClient.agent_path(project)
assert expected == actual
def test_parse_agent_path():
expected = {
"project": "clam",
}
path = AgentsClient.agent_path(**expected)
# Check that the path construction is reversible.
actual = AgentsClient.parse_agent_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AgentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = AgentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AgentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = AgentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = AgentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AgentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = AgentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = AgentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AgentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = AgentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = AgentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AgentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AgentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = AgentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AgentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AgentsTransport, "_prep_wrapped_messages"
) as prep:
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AgentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AgentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AgentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = AgentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(AgentsClient, transports.AgentsGrpcTransport),
(AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow
|
tests/unit/gapic/dialogflow_v2beta1/test_agents.py
|
Python
|
apache-2.0
| 117,955
|
[
"Octopus"
] |
b616ea875ea31649fedc2a395555b8dfdbe66f35a1bcba138e55b4277ed9d7a2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.