text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8
# Copyright (C) 2010 - 2012, A. Murat Eren
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
import argparse
import Oligotyping.lib.oligotyping
version = Oligotyping.lib.oligotyping.__version__
def decomposer():
parser = argparse.ArgumentParser(description='Minimum Entropy Decomposition (version: %s)' % version)
parser.add_argument('alignment', metavar = 'FILEPATH',
help = 'Alignment file that contains all samples and sequences in FASTA format')
parser.add_argument('-m', '--min-entropy', type=float, default=0.0965, metavar="FLOAT",
help = 'Minimum entropy for a component to have in order to be picked as a\
discriminant. Defeault: %(default)f')
parser.add_argument('-X', '--skip-m-normalization', action = 'store_true', default = False,
help = 'Skip the m normalization heuristics. Normalization heuristics normalize --min-entropy\
parameter per node before decomposition depending on the node size, and the most\
abundant sequence in the dataset. Please see the documentation for more\
comprehensive description and best practices. Default: %(default)s')
parser.add_argument('-d', '--number-of-discriminants', type=int, default=4, metavar="INTEGER",
help = 'Number of discriminant locations to be used for entropy decomposition\
discriminant. Defeault: %(default)d')
parser.add_argument('-A', '--min-actual-abundance', type=int, default=0, metavar="INTEGER",
help = 'Minimum number of reads in a node for decomposition to continue. Decomposition\
will continue for any node that has more reads than this number as far as they\
present an entropy that is larger than --min-entropy. This number should be\
chosen carefully depending on the size of the sample. Althought this parameter\
is available to you for historical reasons, For noise filtering, you are\
encouraged to use --min-substantive-abundance parameter instead.')
parser.add_argument('-M', '--min-substantive-abundance', type=int, default=0, metavar = "INTEGER",
help = 'Unlike "actual" abundance, "substantive" abundance is interested in the abundance\
of the most abundant read in a node. If the abundance of the most abundant\
unique sequence in a node is smaller than the number given with this parameter\
the node will be eliminated and not included in downstream analyses. This is the\
most appropriate, and most cases, the only noise filtering parameter. If the user\
does not set a value for minimum substantive abudnance, MED algorithm will set\
one by default by dividing the number of reads in the input dataset by 5,000.')
parser.add_argument('-V', '--maximum-variation-allowed', type=int, default=None, metavar = 'INTEGER',
help = 'This parameter is being used to remove "outliers" from nodes. The similarity of a\
read in a node is less than --maximum-variation-allowed than the representative\
sequence of the node, it is identified as an outlier. If not set, this value is \
being computed depending on the average read length.')
parser.add_argument('-t', '--sample-name-separator', type=str, default='_', metavar = "CHAR",
help = 'Character that separates sample name from unique info in the defline. For insatnce\
if the defline says >sample-1_GD7BRW402IVMZE, the separator should be set to "_"\
(which is the default character).')
parser.add_argument('-o', '--output-directory', help = 'Output directory', default = None)
parser.add_argument('-p', '--project', default = None, type=str, metavar = "STR",
help = 'When a project name is set, given name will be used in figures whenever possible.')
parser.add_argument('-g', '--generate-frequency-curves', action = 'store_true', default = False,
help = 'When set, figure with frequency curve for unique reads and entropy distribution\
will be generated for each node. Depending on the number of nodes, this might\
be a time consuming step.')
parser.add_argument('-S', '--skip-removing-outliers', action = 'store_true', default = False,
help = 'When set, outliers will not be removed from nodes.')
parser.add_argument('-H', '--merge-homopolymer-splits', action = 'store_true', default = False,
help = 'When set, nodes that differ from each other by only one nucleotide that happens\
to be observed as an insertion at the upstream or downstream of a homopolymer\
region will be merged.')
parser.add_argument('-R', '--relocate-outliers', action = 'store_true', default = False,
help = 'Outliers are identified in two places: (1) during the raw topology computation\
and (2) during the refinement step where distant reads are removed from nodes.\
This parameter, when set, makes the pipeline go through each read identified as\
an outlier and try to find the best nodes for them. Please read the documentation\
for details. This step might take a long time. Default: %(default)s')
parser.add_argument('-F', '--store-topology-dict', action = 'store_true', default = False,
help = 'When set, topology dict with read ids will be generated. This may take a very large\
disk space and computation time for large data sets')
parser.add_argument('-K', '--keep-tmp', action = 'store_true', default = False,
help = 'When set, directory with temporary BLAST results will not be deleted at the end of the\
run. It may be necessary to debug the results')
parser.add_argument('-T', '--no-threading', action = 'store_true', default = False,
help = 'When set, decomposer does not spawn multiple threads. Default behavior is\
multi-threaded.')
parser.add_argument('-N', '--number-of-threads', type=int, default = None, metavar = "INTEGER",
help = 'Number of threads to use. It is a good idea to keep this number smaller than the number\
of CPU cores available. If not set, this number will be set to 90%% of available cores,\
or (available cores - 1) if 10%% of the cores is a number smaller than 1')
parser.add_argument('-E', '--sample-mapping', metavar = 'FILEPATH', default = None,
help = 'TAB delimited categorical mapping of samples to be used for post-analysis\
visualizations. Refer to the tutorial for the file format')
parser.add_argument('--gen-html', action = 'store_true', default = False,
help = 'When set, decomposer will generate a static HTML output to browse analysis results')
parser.add_argument('--skip-gen-figures', action = 'store_true', default = False,
help = 'When set, decomposer will not attempt to generate figures post analysis')
parser.add_argument('--skip-check-input-file', action = 'store_true', default = False,
help = 'When set, input FASTA will not be checked for potential errors')
parser.add_argument('--skip-gexf-files', action = 'store_true', default = False,
help = 'When set, GEXF files for network and topology will not be generated')
parser.add_argument('--quick', action = 'store_true', default = False,
help = 'When set, the pipeline will do only the essential steps, skipping anything\
auxiliary, even if other parameters require otherwise. Please do not use it other than\
benchmarking or testing purposes')
parser.add_argument('--version', action = 'store_true', default = False,
help = 'Print version and exit.')
return parser
def oligotyping():
parser = argparse.ArgumentParser(description='Oligotyping (version: %s)' % version)
parser.add_argument('alignment', metavar = 'INPUT ALIGNMENT',
help = 'Alignment file that contains all samples and sequences in FASTA format')
parser.add_argument('entropy', metavar = 'ENTROPY',
help = 'File that contains the columns and the entropy values computer previously')
parser.add_argument('-o', '--output-directory', help = 'Output directory', default = None)
parser.add_argument('-c', '--number-of-auto-components', type=int, default=None,
help = 'Number of components to use from alignment to generate oligotypes. Default\
is "5", which is a completely arbitrary value. Number of components should\
be determined after a careful examination of entropy figure.')
parser.add_argument('--qual-scores-file', metavar = 'QUAL SCORES FILE',
help = 'FASTA formatted file that contains PHRED base call values\
for each read in the alignment file')
parser.add_argument('--qual-scores-dict', metavar = 'QUAL SCORES DICT',
help = 'Previously computed and serialized dictionary that contains\
PHRED base call values for each read in the alignment file. If you\
provide --qual-scores-file, that file will be used to recompute this\
dictionary and the file you refer with this parameter will\
not be ignored')
parser.add_argument('--qual-stats-dict', metavar = 'QUAL STATS DICT',
help = 'Previously computed and serialized dictionary that contains\
PHRED base call quality score statistics for the alignment file. If\
you provide --qual-scores-dict, it will be used to recompute this\
dictionary and the file you refer to with this parameter will\
actually not be used')
parser.add_argument('-q', '--min-base-quality', type=int, default=15,
help = 'Minimum quality score for each base in locations of interest of a read to be\
considered in an oligotype. When base quality score files are provided, this\
value makes sure that low quality bases that are more likely to be the result\
of random sequencing errors do not create artificial oligotypes. Any read that has\
less quality score than the given value, will simply be discarded. This parameter\
only in effect when --qual-scores-file or --qual-scores-dict parameters are used. \
Defeault is %(default)d.')
parser.add_argument('-C', '--selected-components', type=str, default=None,
help = 'Comma separated entropy components to be used during the oligotyping process.')
parser.add_argument('-s', '--min-number-of-samples', type=int, default=1,
help = 'Minimum number of samples oligotype expected to appear. The deafult is "5", which\
is another completely arbitrary value. This parameter should be defined based\
on the number of samples included in the analysis. If there are 10 samples,\
3 might be a good choice, if there are 5 samples, 1 would be a better one\
depending on the study. Default is %(default)d.')
parser.add_argument('-a', '--min-percent-abundance', type=float, default=0.0,
help = 'Minimum percent abundance of an oligotype in at least one sample. The default\
is "0.0". Just like --min-number-of-samples parameter, this parameter too is\
to eliminate oligotypes that are formed by sequencing errors occured at the\
component of interest. The value should be decided based on the average number\
of sequences every sample has.')
parser.add_argument('-A', '--min-actual-abundance', type=int, default=0,
help = 'Minimum total abundance of an oligotype in all datastes. The default\
is "0". If the total abundance of an oligotype is smaller than the number given\
with this parameter, oligotype would be eliminated and not included in downstream\
analyses. Default is %(default)d.')
parser.add_argument('-M', '--min-substantive-abundance', type=int, default=0,
help = 'Unlike "actual" abundance, "substantive" abundance is interested in the abundance\
of the most abundant read in an oligotype. If the abundance of the most abundant\
unique sequence in an oligotype smaller than the number given with this parameter\
the oligotype will be eliminated and not included in downstream analyses. Default\
is %(default)d.')
parser.add_argument('-t', '--sample-name-separator', type=str, default='_',
help = 'Character that separates sample name from unique info in the defline. For insatnce\
if the defline says >sample-1_GD7BRW402IVMZE, the separator should be set to "_"\
(which is the default character).')
parser.add_argument('-l', '--limit-representative-sequences', type=int, default=None,
help = 'At the end of the oligotyping sequences that are being represented by the same\
oligotype are being uniqued and stored in separate files. The number of sequences\
to keep from the frequency ordered list can be defined with this parameter (e.g.\
-l 10 would make it possible that only first 10 sequence would be stored). Default\
is 0, which stores everything, but when the sample size is too big, this could\
take up disk space.')
parser.add_argument('--limit-oligotypes-to', type = str, default = None,
help = 'Comma separated list of oligotypes to be taken into account during the analysis.\
All other oligotypes will be discarded if a list of oligotypes is being speficied\
with this parameter.')
parser.add_argument('-e', '--exclude-oligotypes', type = str, default = None,
help = 'Comma separated list of oligotypes to be excluded from the the analysis.')
parser.add_argument('--quick', action = 'store_true', default = False,
help = 'Some relatively insignificant parts of the analysis may take a lot of time, such as\
generating figures for representative sequences. When this parameter is set, all\
trivial steps would be skipped to give results as soon as possible.')
parser.add_argument('--no-figures', action = 'store_true', default = False,
help = 'When set, no figures will be generated or displayed.')
parser.add_argument('--blast-ref-db', default = None, type=str,
help = 'When set, BLAST search will be done locally against the ref db (local BLAST search\
requires NCBI+ tools)')
parser.add_argument('--colors-list-file', default = None, type=str,
help = 'Optional file that contains HTML color codes in each line to color oligotypes. Number\
of colors in the file has to be equal or greater than the number of abundant\
oligotypes, for which colors are going to be used for.')
parser.add_argument('--skip-blast-search', action = 'store_true', default = False,
help = 'When set, BLAST search step will not be performed.')
parser.add_argument('--no-display', action = 'store_true', default = False,
help = 'When set, no figures will be shown.')
parser.add_argument('--gen-html', action = 'store_true', default = False,
help = 'Generate static HTML output to browse analysis results.')
parser.add_argument('--generate-sets', action = 'store_true', default = False,
help = 'Agglomerate oligotypes into oligotype sets when their frequency patterns across\
samples are similar. Oligotype sets simply put oligotypes into the same set if they \
co-occur in samples consistenly.')
parser.add_argument('-K', '--keep-tmp', action = 'store_true', default = False,
help = 'When set, directory with temporary results will not be deleted at the end of the\
run. It may be necessary to debug the results')
parser.add_argument('-S', '--cosine-similarity-threshold', default = 0.1, type=float, metavar='COS_SIM_TR',\
help = 'This value is used to agglomerate oligotypes into higher order groups. The higher\
the threshold is, the more oligotypes will be pulled together. Cosine similarity\
would return 0 for perfectly similar two vectors. Default is %(default)f.')
parser.add_argument('-E', '--sample-mapping', metavar = 'FILEPATH', default = None,
help = 'TAB delimited categorical mapping of samples to be used for post-analysis\
visualizations. Refer to the tutorial for the file format')
parser.add_argument('--project', default = None, type=str,
help = 'When a project name is set, given name will be used in figures whenever possible.')
parser.add_argument('--skip-check-input-file', action = 'store_true', default = False,
help = 'When set, input FASTA will not be checked for potential errors')
parser.add_argument('--skip-basic-analyses', action = 'store_true', default = False,
help = 'When set, basic analyses, such as basic NMDS plots and clustering, will be\
skipped')
parser.add_argument('--skip-gexf-network-file', action = 'store_true', default = False,
help = 'When set, GEXF network file will not be generated')
parser.add_argument('-T', '--no-threading', action = 'store_true', default = False,
help = 'When set, oligotyping will not spawn multiple threads. Default behavior is\
multi-threaded whenever possible.')
parser.add_argument('-N', '--number-of-threads', type=int, default = None, metavar = "INTEGER",
help = 'Number of threads to use. It is a good idea to keep this number smaller than the number\
of CPU cores available. If not set, this number will be set to 90%% of available cores,\
or (available cores - 1) if 10%% of the cores is a number smaller than 1')
parser.add_argument('--version', action = 'store_true', default = False,
help = 'Print version and exit.')
return parser
def entropy():
parser = argparse.ArgumentParser(description='Entropy Analysis (version: %s)' % version)
parser.add_argument('alignment', metavar = 'ALIGNMENT', help = 'Alignment file\
that contains all samples and sequences in FASTA format')
parser.add_argument('--qual-scores-file', metavar = 'QUAL SCORES FILE',
help = 'FASTA formatted file that contains PHRED base call values\
for each read in the alignment file')
parser.add_argument('--qual-scores-dict', metavar = 'QUAL SCORES DICT',
help = 'Previously computed and serialized dictionary that contains\
PHRED base call values for each read in the alignment file. If you\
provide --qual-scores-file, that file will be used to recompute this\
dictionary and the file you refer with this parameter will\
not be ignored')
parser.add_argument('--qual-stats-dict', metavar = 'QUAL STATS DICT',
help = 'Previously computed and serialized dictionary that contains\
PHRED base call quality score statistics for the alignment file. If\
you provide --qual-scores-dict, it will be used to recompute this\
dictionary and the file you refer to with this parameter will\
actually not be used')
parser.add_argument('--uniqued', action = 'store_true', default = False,
help = 'When set, entropy computation will assume that the reads\
in FASTA file are unique. Frequency information of unique reads\
must be stored in the deflines. Every defline in the FASTA file\
must present the frequency information in this format:\
"freq:NUMBER", e.g. ">Read_ID|X|Y|freq:42", or ">Read_ID|freq:42|X|Y"')
parser.add_argument('--weighted', action = 'store_true', default = False,
help = 'When set, entropy computation per column will use\
mean quality score for each column.')
parser.add_argument('--amino-acid-sequences', action = 'store_true', default = False,
help = 'If sequences are composed of amino acids, instead of\
nucleotides.')
parser.add_argument('--quick', action = 'store_true', default = False,
help = 'When set, entropy values will be shown as fast as\
possible (some visualization steps will be skipped).')
parser.add_argument('--no-display', action = 'store_true', default = False,
help = 'When set, no figures will be shown.')
parser.add_argument('--version', action = 'store_true', default = False,
help = 'Print version and exit.')
return parser
|
jooolia/oligotyping
|
Oligotyping/utils/parsers.py
|
Python
|
gpl-2.0
| 23,979
|
[
"BLAST"
] |
588ae9dd47044e38044d26eb14439604b8ef53be36b4be76c167e078e2b48a2a
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Chain class, used in Structure objects."""
from Bio.PDB.Entity import Entity
__docformat__ = "restructuredtext en"
class Chain(Entity):
def __init__(self, id):
self.level="C"
Entity.__init__(self, id)
# Private methods
def _sort(self, r1, r2):
"""Sort function for residues in a chain
Residues are first sorted according to their hetatm records.
Protein and nucleic acid residues first, hetatm residues next,
and waters last. Within each group, the residues are sorted according
to their resseq's (sequence identifiers). Finally, residues with the
same resseq's are sorted according to icode.
Arguments:
- r1, r2 - Residue objects
"""
hetflag1, resseq1, icode1=r1.id
hetflag2, resseq2, icode2=r2.id
if hetflag1!=hetflag2:
return cmp(hetflag1[0], hetflag2[0])
elif resseq1!=resseq2:
return cmp(resseq1, resseq2)
return cmp(icode1, icode2)
def _translate_id(self, id):
"""
A residue id is normally a tuple (hetero flag, sequence identifier,
insertion code). Since for most residues the hetero flag and the
insertion code are blank (i.e. " "), you can just use the sequence
identifier to index a residue in a chain. The _translate_id method
translates the sequence identifier to the (" ", sequence identifier,
" ") tuple.
Arguments:
o id - int, residue resseq
"""
if isinstance(id, int):
id=(' ', id, ' ')
return id
# Special methods
def __getitem__(self, id):
"""Return the residue with given id.
The id of a residue is (hetero flag, sequence identifier, insertion code).
If id is an int, it is translated to (" ", id, " ") by the _translate_id
method.
Arguments:
o id - (string, int, string) or int
"""
id=self._translate_id(id)
return Entity.__getitem__(self, id)
def __contains__(self, id):
"""True if a residue with given id is present in this chain.
Arguments:
o id - (string, int, string) or int
"""
id=self._translate_id(id)
return Entity.__contains__(self, id)
def __delitem__(self, id):
"""
Arguments:
o id - (string, int, string) or int
"""
id=self._translate_id(id)
return Entity.__delitem__(self, id)
def __repr__(self):
return "<Chain id=%s>" % self.get_id()
# Public methods
def get_unpacked_list(self):
"""Return a list of undisordered residues.
Some Residue objects hide several disordered residues
(DisorderedResidue objects). This method unpacks them,
ie. it returns a list of simple Residue objects.
"""
unpacked_list=[]
for residue in self.get_list():
if residue.is_disordered()==2:
for dresidue in residue.disordered_get_list():
unpacked_list.append(dresidue)
else:
unpacked_list.append(residue)
return unpacked_list
def has_id(self, id):
"""Return 1 if a residue with given id is present.
The id of a residue is (hetero flag, sequence identifier, insertion code).
If id is an int, it is translated to (" ", id, " ") by the _translate_id
method.
Arguments:
- id - (string, int, string) or int
"""
id=self._translate_id(id)
return Entity.has_id(self, id)
# Public
def get_residues(self):
for r in self:
yield r
def get_atoms(self):
for r in self:
for a in r:
yield a
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/PDB/Chain.py
|
Python
|
gpl-2.0
| 4,035
|
[
"Biopython"
] |
cf09026171bcadca34bb9208851d038f141840027aab737944288f6ab750c9dd
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" Methods for common attributes. """
import six
from django.utils.encoding import python_2_unicode_compatible
import tldap
import tldap.methods.models
import tldap.ldap_passwd as ldap_passwd
import datetime
@python_2_unicode_compatible
class personMixin(object):
@classmethod
def __str__(cls, self):
return six.u("P:%s") % (self.displayName or self.cn)
@classmethod
def check_password(cls, self, password):
using = self._alias
return tldap.connections[using].check_password(self.dn, password)
@classmethod
def pre_save(cls, self):
self.displayName = '%s %s' % (self.givenName, self.sn)
self.cn = self.displayName
@classmethod
def change_password(cls, self, password):
self.userPassword = ldap_passwd.encode_password(password)
@python_2_unicode_compatible
class accountMixin(object):
@classmethod
def set_free_uidNumber(cls, self):
using = self._alias
model = self.__class__
settings = self._settings
scheme = settings.get('NUMBER_SCHEME', using)
first = settings.get('UID_FIRST', 10000)
self.uidNumber = tldap.methods.models.Counters.get_and_increment(
scheme, "uidNumber", first,
lambda n: len(
model.objects.using(using, settings)
.filter(uidNumber=n)) == 0)
@classmethod
def __str__(cls, self):
return six.u("%s") % (self.displayName or self.cn)
@classmethod
def setup_from_master(cls, self, master):
self.uidNumber = master.uidNumber
@classmethod
def pre_add(cls, self):
if self.loginShell is None:
self.loginShell = '/bin/bash'
if self.uidNumber is None:
cls.set_free_uidNumber(self)
if self.unixHomeDirectory is None and self.uid is not None:
self.unixHomeDirectory = '/home/%s' % self.uid
@classmethod
def pre_save(cls, self):
self.gecos = '%s %s' % (self.givenName, self.sn)
@classmethod
def pre_delete(cls, self):
self.manager_of.clear()
@classmethod
def lock(cls, self):
if self.loginShell is None:
return
if not self.loginShell.startswith("/locked"):
self.loginShell = '/locked' + self.loginShell
@classmethod
def unlock(cls, self):
if self.loginShell is None:
return
if self.loginShell.startswith("/locked"):
self.loginShell = self.loginShell[7:]
class shadowMixin(object):
@classmethod
def change_password(cls, self, password):
self.shadowLastChange = datetime.datetime.now().date()
@python_2_unicode_compatible
class groupMixin(object):
# Note standard posixGroup objectClass has no displayName attribute
@classmethod
def set_free_gidNumber(cls, self):
using = self._alias
model = self.__class__
settings = self._settings
scheme = settings.get('NUMBER_SCHEME', using)
first = settings.get('GID_FIRST', 10000)
self.gidNumber = tldap.methods.models.Counters.get_and_increment(
scheme, "gidNumber", first,
lambda n: len(
model.objects.using(using, settings)
.filter(gidNumber=n)) == 0)
@classmethod
def __str__(cls, self):
return six.u("%s") % self.cn
@classmethod
def setup_from_master(cls, self, master):
self.gidNumber = master.gidNumber
@classmethod
def pre_add(cls, self):
if self.gidNumber is None:
cls.set_free_gidNumber(self)
@classmethod
def pre_save(cls, self):
if self.description is None:
self.description = self.cn
|
brianmay/python-tldap-debian
|
tldap/methods/common.py
|
Python
|
gpl-3.0
| 4,413
|
[
"Brian"
] |
88a194d37a7716f0e1c6038087c2dc35906050ea1d7b5b36f9d854e4b38896c4
|
import time, math, unicodedata
from zope.interface import implements
from twisted.internet import defer
from foolscap.api import fireEventually
import simplejson
from allmydata.mutable.common import NotWriteableError
from allmydata.mutable.filenode import MutableFileNode
from allmydata.unknown import UnknownNode, strip_prefix_for_ro
from allmydata.interfaces import IFilesystemNode, IDirectoryNode, IFileNode, \
IImmutableFileNode, IMutableFileNode, \
ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
MustBeDeepImmutableError, CapConstraintError, ChildOfWrongTypeError
from allmydata.check_results import DeepCheckResults, \
DeepCheckAndRepairResults
from allmydata.monitor import Monitor
from allmydata.util import hashutil, mathutil, base32, log
from allmydata.util.encodingutil import quote_output
from allmydata.util.assertutil import precondition
from allmydata.util.netstring import netstring, split_netstring
from allmydata.util.consumer import download_to_data
from allmydata.uri import LiteralFileURI, from_string, wrap_dirnode_cap
from pycryptopp.cipher.aes import AES
from allmydata.util.dictutil import AuxValueDict
def update_metadata(metadata, new_metadata, now):
"""Updates 'metadata' in-place with the information in 'new_metadata'.
Timestamps are set according to the time 'now'."""
if metadata is None:
metadata = {}
old_ctime = None
if 'ctime' in metadata:
old_ctime = metadata['ctime']
if new_metadata is not None:
# Overwrite all metadata.
newmd = new_metadata.copy()
# Except 'tahoe'.
if 'tahoe' in newmd:
del newmd['tahoe']
if 'tahoe' in metadata:
newmd['tahoe'] = metadata['tahoe']
metadata = newmd
# update timestamps
sysmd = metadata.get('tahoe', {})
if 'linkcrtime' not in sysmd:
# In Tahoe < 1.4.0 we used the word 'ctime' to mean what Tahoe >= 1.4.0
# calls 'linkcrtime'. This field is only used if it was in the old metadata,
# and 'tahoe:linkcrtime' was not.
if old_ctime is not None:
sysmd['linkcrtime'] = old_ctime
else:
sysmd['linkcrtime'] = now
sysmd['linkmotime'] = now
metadata['tahoe'] = sysmd
return metadata
# 'x' at the end of a variable name indicates that it holds a Unicode string that may not
# be NFC-normalized.
def normalize(namex):
return unicodedata.normalize('NFC', namex)
# TODO: {Deleter,MetadataSetter,Adder}.modify all start by unpacking the
# contents and end by repacking them. It might be better to apply them to
# the unpacked contents.
class Deleter:
def __init__(self, node, namex, must_exist=True, must_be_directory=False, must_be_file=False):
self.node = node
self.name = normalize(namex)
self.must_exist = must_exist
self.must_be_directory = must_be_directory
self.must_be_file = must_be_file
def modify(self, old_contents, servermap, first_time):
children = self.node._unpack_contents(old_contents)
if self.name not in children:
if first_time and self.must_exist:
raise NoSuchChildError(self.name)
self.old_child = None
return None
self.old_child, metadata = children[self.name]
# Unknown children can be removed regardless of must_be_directory or must_be_file.
if self.must_be_directory and IFileNode.providedBy(self.old_child):
raise ChildOfWrongTypeError("delete required a directory, not a file")
if self.must_be_file and IDirectoryNode.providedBy(self.old_child):
raise ChildOfWrongTypeError("delete required a file, not a directory")
del children[self.name]
new_contents = self.node._pack_contents(children)
return new_contents
class MetadataSetter:
def __init__(self, node, namex, metadata, create_readonly_node=None):
self.node = node
self.name = normalize(namex)
self.metadata = metadata
self.create_readonly_node = create_readonly_node
def modify(self, old_contents, servermap, first_time):
children = self.node._unpack_contents(old_contents)
name = self.name
if name not in children:
raise NoSuchChildError(name)
now = time.time()
child = children[name][0]
metadata = update_metadata(children[name][1].copy(), self.metadata, now)
if self.create_readonly_node and metadata.get('no-write', False):
child = self.create_readonly_node(child, name)
children[name] = (child, metadata)
new_contents = self.node._pack_contents(children)
return new_contents
class Adder:
def __init__(self, node, entries=None, overwrite=True, create_readonly_node=None):
self.node = node
if entries is None:
entries = {}
precondition(isinstance(entries, dict), entries)
# keys of 'entries' may not be normalized.
self.entries = entries
self.overwrite = overwrite
self.create_readonly_node = create_readonly_node
def set_node(self, namex, node, metadata):
precondition(IFilesystemNode.providedBy(node), node)
self.entries[namex] = (node, metadata)
def modify(self, old_contents, servermap, first_time):
children = self.node._unpack_contents(old_contents)
now = time.time()
for (namex, (child, new_metadata)) in self.entries.iteritems():
name = normalize(namex)
precondition(IFilesystemNode.providedBy(child), child)
# Strictly speaking this is redundant because we would raise the
# error again in _pack_normalized_children.
child.raise_error()
metadata = None
if name in children:
if not self.overwrite:
raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8'))
if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8'))
metadata = children[name][1].copy()
metadata = update_metadata(metadata, new_metadata, now)
if self.create_readonly_node and metadata.get('no-write', False):
child = self.create_readonly_node(child, name)
children[name] = (child, metadata)
new_contents = self.node._pack_contents(children)
return new_contents
def _encrypt_rw_uri(writekey, rw_uri):
precondition(isinstance(rw_uri, str), rw_uri)
precondition(isinstance(writekey, str), writekey)
salt = hashutil.mutable_rwcap_salt_hash(rw_uri)
key = hashutil.mutable_rwcap_key_hash(salt, writekey)
cryptor = AES(key)
crypttext = cryptor.process(rw_uri)
mac = hashutil.hmac(key, salt + crypttext)
assert len(mac) == 32
return salt + crypttext + mac
# The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
# produce it for the sake of older readers.
def pack_children(childrenx, writekey, deep_immutable=False):
# initial_children must have metadata (i.e. {} instead of None)
children = {}
for (namex, (node, metadata)) in childrenx.iteritems():
precondition(isinstance(metadata, dict),
"directory creation requires metadata to be a dict, not None", metadata)
children[normalize(namex)] = (node, metadata)
return _pack_normalized_children(children, writekey=writekey, deep_immutable=deep_immutable)
ZERO_LEN_NETSTR=netstring('')
def _pack_normalized_children(children, writekey, deep_immutable=False):
"""Take a dict that maps:
children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
and pack it into a single string, for use as the contents of the backing
file. This is the same format as is returned by _unpack_contents. I also
accept an AuxValueDict, in which case I'll use the auxilliary cached data
as the pre-packed entry, which is faster than re-packing everything each
time.
If writekey is provided then I will superencrypt the child's writecap with
writekey.
If deep_immutable is True, I will require that all my children are deeply
immutable, and will raise a MustBeDeepImmutableError if not.
"""
precondition((writekey is None) or isinstance(writekey, str), writekey)
has_aux = isinstance(children, AuxValueDict)
entries = []
for name in sorted(children.keys()):
assert isinstance(name, unicode)
entry = None
(child, metadata) = children[name]
child.raise_error()
if deep_immutable and not child.is_allowed_in_immutable_directory():
raise MustBeDeepImmutableError("child %s is not allowed in an immutable directory" %
quote_output(name, encoding='utf-8'), name)
if has_aux:
entry = children.get_aux(name)
if not entry:
assert IFilesystemNode.providedBy(child), (name,child)
assert isinstance(metadata, dict)
rw_uri = child.get_write_uri()
if rw_uri is None:
rw_uri = ""
assert isinstance(rw_uri, str), rw_uri
# should be prevented by MustBeDeepImmutableError check above
assert not (rw_uri and deep_immutable)
ro_uri = child.get_readonly_uri()
if ro_uri is None:
ro_uri = ""
assert isinstance(ro_uri, str), ro_uri
if writekey is not None:
writecap = netstring(_encrypt_rw_uri(writekey, rw_uri))
else:
writecap = ZERO_LEN_NETSTR
entry = "".join([netstring(name.encode("utf-8")),
netstring(strip_prefix_for_ro(ro_uri, deep_immutable)),
writecap,
netstring(simplejson.dumps(metadata))])
entries.append(netstring(entry))
return "".join(entries)
class DirectoryNode:
implements(IDirectoryNode, ICheckable, IDeepCheckable)
filenode_class = MutableFileNode
def __init__(self, filenode, nodemaker, uploader):
assert IFileNode.providedBy(filenode), filenode
assert not IDirectoryNode.providedBy(filenode), filenode
self._node = filenode
filenode_cap = filenode.get_cap()
self._uri = wrap_dirnode_cap(filenode_cap)
self._nodemaker = nodemaker
self._uploader = uploader
def __repr__(self):
return "<%s %s-%s %s>" % (self.__class__.__name__,
self.is_readonly() and "RO" or "RW",
self.is_mutable() and "MUT" or "IMM",
hasattr(self, '_uri') and self._uri.abbrev())
def get_size(self):
"""Return the size of our backing mutable file, in bytes, if we've
fetched it. Otherwise return None. This returns synchronously."""
return self._node.get_size()
def get_current_size(self):
"""Calculate the size of our backing mutable file, in bytes. Returns
a Deferred that fires with the result."""
return self._node.get_current_size()
def _read(self):
if self._node.is_mutable():
# use the IMutableFileNode API.
d = self._node.download_best_version()
else:
d = download_to_data(self._node)
d.addCallback(self._unpack_contents)
return d
def _decrypt_rwcapdata(self, encwrcap):
salt = encwrcap[:16]
crypttext = encwrcap[16:-32]
key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
cryptor = AES(key)
plaintext = cryptor.process(crypttext)
return plaintext
def _create_and_validate_node(self, rw_uri, ro_uri, name):
# name is just for error reporting
node = self._nodemaker.create_from_cap(rw_uri, ro_uri,
deep_immutable=not self.is_mutable(),
name=name)
node.raise_error()
return node
def _create_readonly_node(self, node, name):
# name is just for error reporting
if not node.is_unknown() and node.is_readonly():
return node
return self._create_and_validate_node(None, node.get_readonly_uri(), name=name)
def _unpack_contents(self, data):
# the directory is serialized as a list of netstrings, one per child.
# Each child is serialized as a list of four netstrings: (name, ro_uri,
# rwcapdata, metadata), in which the name, ro_uri, metadata are in
# cleartext. The 'name' is UTF-8 encoded, and should be normalized to NFC.
# The rwcapdata is formatted as:
# pack("16ss32s", iv, AES(H(writekey+iv), plaintext_rw_uri), mac)
assert isinstance(data, str), (repr(data), type(data))
# an empty directory is serialized as an empty string
if data == "":
return AuxValueDict()
writeable = not self.is_readonly()
mutable = self.is_mutable()
children = AuxValueDict()
position = 0
while position < len(data):
entries, position = split_netstring(data, 1, position)
entry = entries[0]
(namex_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
if not mutable and len(rwcapdata) > 0:
raise ValueError("the rwcapdata field of a dirnode in an immutable directory was not empty")
# A name containing characters that are unassigned in one version of Unicode might
# not be normalized wrt a later version. See the note in section 'Normalization Stability'
# at <http://unicode.org/policies/stability_policy.html>.
# Therefore we normalize names going both in and out of directories.
name = normalize(namex_utf8.decode("utf-8"))
rw_uri = ""
if writeable:
rw_uri = self._decrypt_rwcapdata(rwcapdata)
# Since the encryption uses CTR mode, it currently leaks the length of the
# plaintext rw_uri -- and therefore whether it is present, i.e. whether the
# dirnode is writeable (ticket #925). By stripping trailing spaces in
# Tahoe >= 1.6.0, we may make it easier for future versions to plug this leak.
# ro_uri is treated in the same way for consistency.
# rw_uri and ro_uri will be either None or a non-empty string.
rw_uri = rw_uri.rstrip(' ') or None
ro_uri = ro_uri.rstrip(' ') or None
try:
child = self._create_and_validate_node(rw_uri, ro_uri, name)
if mutable or child.is_allowed_in_immutable_directory():
metadata = simplejson.loads(metadata_s)
assert isinstance(metadata, dict)
children[name] = (child, metadata)
children.set_with_aux(name, (child, metadata), auxilliary=entry)
else:
log.msg(format="mutable cap for child %(name)s unpacked from an immutable directory",
name=quote_output(name, encoding='utf-8'),
facility="tahoe.webish", level=log.UNUSUAL)
except CapConstraintError, e:
log.msg(format="unmet constraint on cap for child %(name)s unpacked from a directory:\n"
"%(message)s", message=e.args[0], name=quote_output(name, encoding='utf-8'),
facility="tahoe.webish", level=log.UNUSUAL)
return children
def _pack_contents(self, children):
# expects children in the same format as _unpack_contents returns
return _pack_normalized_children(children, self._node.get_writekey())
def is_readonly(self):
return self._node.is_readonly()
def is_mutable(self):
return self._node.is_mutable()
def is_unknown(self):
return False
def is_allowed_in_immutable_directory(self):
return not self._node.is_mutable()
def raise_error(self):
pass
def get_uri(self):
return self._uri.to_string()
def get_write_uri(self):
if self.is_readonly():
return None
return self._uri.to_string()
def get_readonly_uri(self):
return self._uri.get_readonly().to_string()
def get_cap(self):
return self._uri
def get_readcap(self):
return self._uri.get_readonly()
def get_verify_cap(self):
return self._uri.get_verify_cap()
def get_repair_cap(self):
if self._node.is_readonly():
return None # readonly (mutable) dirnodes are not yet repairable
return self._uri
def get_storage_index(self):
return self._uri.get_storage_index()
def check(self, monitor, verify=False, add_lease=False):
"""Perform a file check. See IChecker.check for details."""
return self._node.check(monitor, verify, add_lease)
def check_and_repair(self, monitor, verify=False, add_lease=False):
return self._node.check_and_repair(monitor, verify, add_lease)
def list(self):
"""I return a Deferred that fires with a dictionary mapping child
name to a tuple of (IFilesystemNode, metadata)."""
return self._read()
def has_child(self, namex):
"""I return a Deferred that fires with a boolean, True if there
exists a child of the given name, False if not."""
name = normalize(namex)
d = self._read()
d.addCallback(lambda children: children.has_key(name))
return d
def _get(self, children, name):
child = children.get(name)
if child is None:
raise NoSuchChildError(name)
return child[0]
def _get_with_metadata(self, children, name):
child = children.get(name)
if child is None:
raise NoSuchChildError(name)
return child
def get(self, namex):
"""I return a Deferred that fires with the named child node,
which is an IFilesystemNode."""
name = normalize(namex)
d = self._read()
d.addCallback(self._get, name)
return d
def get_child_and_metadata(self, namex):
"""I return a Deferred that fires with the (node, metadata) pair for
the named child. The node is an IFilesystemNode, and the metadata
is a dictionary."""
name = normalize(namex)
d = self._read()
d.addCallback(self._get_with_metadata, name)
return d
def get_metadata_for(self, namex):
name = normalize(namex)
d = self._read()
d.addCallback(lambda children: children[name][1])
return d
def set_metadata_for(self, namex, metadata):
name = normalize(namex)
if self.is_readonly():
return defer.fail(NotWriteableError())
assert isinstance(metadata, dict)
s = MetadataSetter(self, name, metadata,
create_readonly_node=self._create_readonly_node)
d = self._node.modify(s.modify)
d.addCallback(lambda res: self)
return d
def get_child_at_path(self, pathx):
"""Transform a child path into an IFilesystemNode.
I perform a recursive series of 'get' operations to find the named
descendant node. I return a Deferred that fires with the node, or
errbacks with IndexError if the node could not be found.
The path can be either a single string (slash-separated) or a list of
path-name elements.
"""
d = self.get_child_and_metadata_at_path(pathx)
d.addCallback(lambda (node, metadata): node)
return d
def get_child_and_metadata_at_path(self, pathx):
"""Transform a child path into an IFilesystemNode and
a metadata dictionary from the last edge that was traversed.
"""
if not pathx:
return defer.succeed((self, {}))
if isinstance(pathx, (list, tuple)):
pass
else:
pathx = pathx.split("/")
for p in pathx:
assert isinstance(p, unicode), p
childnamex = pathx[0]
remaining_pathx = pathx[1:]
if remaining_pathx:
d = self.get(childnamex)
d.addCallback(lambda node:
node.get_child_and_metadata_at_path(remaining_pathx))
return d
d = self.get_child_and_metadata(childnamex)
return d
def set_uri(self, namex, writecap, readcap, metadata=None, overwrite=True):
precondition(isinstance(writecap, (str,type(None))), writecap)
precondition(isinstance(readcap, (str,type(None))), readcap)
# We now allow packing unknown nodes, provided they are valid
# for this type of directory.
child_node = self._create_and_validate_node(writecap, readcap, namex)
d = self.set_node(namex, child_node, metadata, overwrite)
d.addCallback(lambda res: child_node)
return d
def set_children(self, entries, overwrite=True):
# this takes URIs
a = Adder(self, overwrite=overwrite,
create_readonly_node=self._create_readonly_node)
for (namex, e) in entries.iteritems():
assert isinstance(namex, unicode), namex
if len(e) == 2:
writecap, readcap = e
metadata = None
else:
assert len(e) == 3
writecap, readcap, metadata = e
precondition(isinstance(writecap, (str,type(None))), writecap)
precondition(isinstance(readcap, (str,type(None))), readcap)
# We now allow packing unknown nodes, provided they are valid
# for this type of directory.
child_node = self._create_and_validate_node(writecap, readcap, namex)
a.set_node(namex, child_node, metadata)
d = self._node.modify(a.modify)
d.addCallback(lambda ign: self)
return d
def set_node(self, namex, child, metadata=None, overwrite=True):
"""I add a child at the specific name. I return a Deferred that fires
when the operation finishes. This Deferred will fire with the child
node that was just added. I will replace any existing child of the
same name.
If this directory node is read-only, the Deferred will errback with a
NotWriteableError."""
precondition(IFilesystemNode.providedBy(child), child)
if self.is_readonly():
return defer.fail(NotWriteableError())
assert IFilesystemNode.providedBy(child), child
a = Adder(self, overwrite=overwrite,
create_readonly_node=self._create_readonly_node)
a.set_node(namex, child, metadata)
d = self._node.modify(a.modify)
d.addCallback(lambda res: child)
return d
def set_nodes(self, entries, overwrite=True):
precondition(isinstance(entries, dict), entries)
if self.is_readonly():
return defer.fail(NotWriteableError())
a = Adder(self, entries, overwrite=overwrite,
create_readonly_node=self._create_readonly_node)
d = self._node.modify(a.modify)
d.addCallback(lambda res: self)
return d
def add_file(self, namex, uploadable, metadata=None, overwrite=True):
"""I upload a file (using the given IUploadable), then attach the
resulting FileNode to the directory at the given name. I return a
Deferred that fires (with the IFileNode of the uploaded file) when
the operation completes."""
name = normalize(namex)
if self.is_readonly():
return defer.fail(NotWriteableError())
d = self._uploader.upload(uploadable)
d.addCallback(lambda results:
self._create_and_validate_node(results.uri, None, name))
d.addCallback(lambda node:
self.set_node(name, node, metadata, overwrite))
return d
def delete(self, namex, must_exist=True, must_be_directory=False, must_be_file=False):
"""I remove the child at the specific name. I return a Deferred that
fires (with the node just removed) when the operation finishes."""
if self.is_readonly():
return defer.fail(NotWriteableError())
deleter = Deleter(self, namex, must_exist=must_exist,
must_be_directory=must_be_directory, must_be_file=must_be_file)
d = self._node.modify(deleter.modify)
d.addCallback(lambda res: deleter.old_child)
return d
def create_subdirectory(self, namex, initial_children={}, overwrite=True,
mutable=True, metadata=None):
name = normalize(namex)
if self.is_readonly():
return defer.fail(NotWriteableError())
if mutable:
d = self._nodemaker.create_new_mutable_directory(initial_children)
else:
d = self._nodemaker.create_immutable_directory(initial_children)
def _created(child):
entries = {name: (child, metadata)}
a = Adder(self, entries, overwrite=overwrite,
create_readonly_node=self._create_readonly_node)
d = self._node.modify(a.modify)
d.addCallback(lambda res: child)
return d
d.addCallback(_created)
return d
def move_child_to(self, current_child_namex, new_parent,
new_child_namex=None, overwrite=True):
"""I take one of my children and move them to a new parent. The child
is referenced by name. On the new parent, the child will live under
'new_child_name', which defaults to 'current_child_name'. I return a
Deferred that fires when the operation finishes."""
if self.is_readonly() or new_parent.is_readonly():
return defer.fail(NotWriteableError())
current_child_name = normalize(current_child_namex)
if new_child_namex is None:
new_child_namex = current_child_name
d = self.get(current_child_name)
def sn(child):
return new_parent.set_node(new_child_namex, child,
overwrite=overwrite)
d.addCallback(sn)
d.addCallback(lambda child: self.delete(current_child_name))
return d
def deep_traverse(self, walker):
"""Perform a recursive walk, using this dirnode as a root, notifying
the 'walker' instance of everything I encounter.
I call walker.enter_directory(parent, children) once for each dirnode
I visit, immediately after retrieving the list of children. I pass in
the parent dirnode and the dict of childname->(childnode,metadata).
This function should *not* traverse the children: I will do that.
enter_directory() is most useful for the deep-stats number that
counts how large a directory is.
I call walker.add_node(node, path) for each node (both files and
directories) I can reach. Most work should be done here.
I avoid loops by keeping track of verifier-caps and refusing to call
walker.add_node() or traverse a node that I've seen before. This
means that any file or directory will only be given to the walker
once. If files or directories are referenced multiple times by a
directory structure, this may appear to under-count or miss some of
them.
I return a Monitor which can be used to wait for the operation to
finish, learn about its progress, or cancel the operation.
"""
# this is just a tree-walker, except that following each edge
# requires a Deferred. We used to use a ConcurrencyLimiter to limit
# fanout to 10 simultaneous operations, but the memory load of the
# queued operations was excessive (in one case, with 330k dirnodes,
# it caused the process to run into the 3.0GB-ish per-process 32bit
# linux memory limit, and crashed). So we use a single big Deferred
# chain, and do a strict depth-first traversal, one node at a time.
# This can be slower, because we aren't pipelining directory reads,
# but it brought the memory footprint down by roughly 50%.
monitor = Monitor()
walker.set_monitor(monitor)
found = set([self.get_verify_cap()])
d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
d.addCallback(lambda ignored: walker.finish())
d.addBoth(monitor.finish)
d.addErrback(lambda f: None)
return monitor
def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
# process this directory, then walk its children
monitor.raise_if_cancelled()
d = defer.maybeDeferred(walker.add_node, node, path)
d.addCallback(lambda ignored: node.list())
d.addCallback(self._deep_traverse_dirnode_children, node, path,
walker, monitor, found)
return d
def _deep_traverse_dirnode_children(self, children, parent, path,
walker, monitor, found):
monitor.raise_if_cancelled()
d = defer.maybeDeferred(walker.enter_directory, parent, children)
# we process file-like children first, so we can drop their FileNode
# objects as quickly as possible. Tests suggest that a FileNode (held
# in the client's nodecache) consumes about 2440 bytes. dirnodes (not
# in the nodecache) seem to consume about 2000 bytes.
dirkids = []
filekids = []
for name, (child, metadata) in sorted(children.iteritems()):
childpath = path + [name]
if isinstance(child, UnknownNode):
walker.add_node(child, childpath)
continue
verifier = child.get_verify_cap()
# allow LIT files (for which verifier==None) to be processed
if (verifier is not None) and (verifier in found):
continue
found.add(verifier)
if IDirectoryNode.providedBy(child):
dirkids.append( (child, childpath) )
else:
filekids.append( (child, childpath) )
for i, (child, childpath) in enumerate(filekids):
d.addCallback(lambda ignored, child=child, childpath=childpath:
walker.add_node(child, childpath))
# to work around the Deferred tail-recursion problem
# (specifically the defer.succeed flavor) requires us to avoid
# doing more than 158 LIT files in a row. We insert a turn break
# once every 100 files (LIT or CHK) to preserve some stack space
# for other code. This is a different expression of the same
# Twisted problem as in #237.
if i % 100 == 99:
d.addCallback(lambda ignored: fireEventually())
for (child, childpath) in dirkids:
d.addCallback(lambda ignored, child=child, childpath=childpath:
self._deep_traverse_dirnode(child, childpath,
walker, monitor,
found))
return d
def build_manifest(self):
"""Return a Monitor, with a ['status'] that will be a list of (path,
cap) tuples, for all nodes (directories and files) reachable from
this one."""
walker = ManifestWalker(self)
return self.deep_traverse(walker)
def start_deep_stats(self):
# Since deep_traverse tracks verifier caps, we avoid double-counting
# children for which we've got both a write-cap and a read-cap
return self.deep_traverse(DeepStats(self))
def start_deep_check(self, verify=False, add_lease=False):
return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
def start_deep_check_and_repair(self, verify=False, add_lease=False):
return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
class DeepStats:
def __init__(self, origin):
self.origin = origin
self.stats = {}
for k in ["count-immutable-files",
"count-mutable-files",
"count-literal-files",
"count-files",
"count-directories",
"count-unknown",
"size-immutable-files",
#"size-mutable-files",
"size-literal-files",
"size-directories",
"largest-directory",
"largest-directory-children",
"largest-immutable-file",
#"largest-mutable-file",
]:
self.stats[k] = 0
self.histograms = {}
for k in ["size-files-histogram"]:
self.histograms[k] = {} # maps (min,max) to count
self.buckets = [ (0,0), (1,3)]
self.root = math.sqrt(10)
def set_monitor(self, monitor):
self.monitor = monitor
monitor.origin_si = self.origin.get_storage_index()
monitor.set_status(self.get_results())
def add_node(self, node, childpath):
if isinstance(node, UnknownNode):
self.add("count-unknown")
elif IDirectoryNode.providedBy(node):
self.add("count-directories")
elif IMutableFileNode.providedBy(node):
self.add("count-files")
self.add("count-mutable-files")
# TODO: update the servermap, compute a size, add it to
# size-mutable-files, max it into "largest-mutable-file"
elif IImmutableFileNode.providedBy(node): # CHK and LIT
self.add("count-files")
size = node.get_size()
self.histogram("size-files-histogram", size)
theuri = from_string(node.get_uri())
if isinstance(theuri, LiteralFileURI):
self.add("count-literal-files")
self.add("size-literal-files", size)
else:
self.add("count-immutable-files")
self.add("size-immutable-files", size)
self.max("largest-immutable-file", size)
def enter_directory(self, parent, children):
dirsize_bytes = parent.get_size()
if dirsize_bytes is not None:
self.add("size-directories", dirsize_bytes)
self.max("largest-directory", dirsize_bytes)
dirsize_children = len(children)
self.max("largest-directory-children", dirsize_children)
def add(self, key, value=1):
self.stats[key] += value
def max(self, key, value):
self.stats[key] = max(self.stats[key], value)
def which_bucket(self, size):
# return (min,max) such that min <= size <= max
# values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
# (101,316), (317, 1000), etc: two per decade
assert size >= 0
i = 0
while True:
if i >= len(self.buckets):
# extend the list
new_lower = self.buckets[i-1][1]+1
new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
self.buckets.append( (new_lower, new_upper) )
maybe = self.buckets[i]
if maybe[0] <= size <= maybe[1]:
return maybe
i += 1
def histogram(self, key, size):
bucket = self.which_bucket(size)
h = self.histograms[key]
if bucket not in h:
h[bucket] = 0
h[bucket] += 1
def get_results(self):
stats = self.stats.copy()
for key in self.histograms:
h = self.histograms[key]
out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
out.sort()
stats[key] = out
return stats
def finish(self):
return self.get_results()
class ManifestWalker(DeepStats):
def __init__(self, origin):
DeepStats.__init__(self, origin)
self.manifest = []
self.storage_index_strings = set()
self.verifycaps = set()
def add_node(self, node, path):
self.manifest.append( (tuple(path), node.get_uri()) )
si = node.get_storage_index()
if si:
self.storage_index_strings.add(base32.b2a(si))
v = node.get_verify_cap()
if v:
self.verifycaps.add(v.to_string())
return DeepStats.add_node(self, node, path)
def get_results(self):
stats = DeepStats.get_results(self)
return {"manifest": self.manifest,
"verifycaps": self.verifycaps,
"storage-index": self.storage_index_strings,
"stats": stats,
}
class DeepChecker:
def __init__(self, root, verify, repair, add_lease):
root_si = root.get_storage_index()
if root_si:
root_si_base32 = base32.b2a(root_si)
else:
root_si_base32 = ""
self._lp = log.msg(format="deep-check starting (%(si)s),"
" verify=%(verify)s, repair=%(repair)s",
si=root_si_base32, verify=verify, repair=repair)
self._verify = verify
self._repair = repair
self._add_lease = add_lease
if repair:
self._results = DeepCheckAndRepairResults(root_si)
else:
self._results = DeepCheckResults(root_si)
self._stats = DeepStats(root)
def set_monitor(self, monitor):
self.monitor = monitor
monitor.set_status(self._results)
def add_node(self, node, childpath):
if self._repair:
d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
d.addCallback(self._results.add_check_and_repair, childpath)
else:
d = node.check(self.monitor, self._verify, self._add_lease)
d.addCallback(self._results.add_check, childpath)
d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
return d
def enter_directory(self, parent, children):
return self._stats.enter_directory(parent, children)
def finish(self):
log.msg("deep-check done", parent=self._lp)
self._results.update_stats(self._stats.get_results())
return self._results
# use client.create_dirnode() to make one of these
|
drewp/tahoe-lafs
|
src/allmydata/dirnode.py
|
Python
|
gpl-2.0
| 38,853
|
[
"VisIt"
] |
911d1fb4cb475b029772b9c80da232cb75b9f94a8ed82ecd2b021180d137c3e6
|
# Generated from MySQLParser.g4 by ANTLR 4.5.3
from antlr4 import *
class TableInfo:
table_name_to_alias = {}
table_alias_to_name = {}
on_conditions = []
@staticmethod
def reset():
TableInfo.table_name_to_alias = {}
TableInfo.table_alias_to_name = {}
TableInfo.on_conditions = []
def get_table_alias_to_name():
return
# This class defines a complete generic visitor for a parse tree produced by MySQLParser.
class MySQLParserVisitor(ParseTreeVisitor):
# Visit a parse tree produced by MySQLParser#stat.
def visitStat(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#schema_name.
def visitSchema_name(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#select_clause.
def visitSelect_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_name.
def visitTable_name(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_alias.
def visitTable_alias(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#column_name.
def visitColumn_name(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#column_name_alias.
def visitColumn_name_alias(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#index_name.
def visitIndex_name(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#column_list.
def visitColumn_list(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#column_list_clause.
def visitColumn_list_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#from_clause.
def visitFrom_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#select_key.
def visitSelect_key(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#where_clause.
def visitWhere_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#expression.
def visitExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#element.
def visitElement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#right_element.
def visitRight_element(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#left_element.
def visitLeft_element(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#target_element.
def visitTarget_element(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#relational_op.
def visitRelational_op(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#expr_op.
def visitExpr_op(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#between_op.
def visitBetween_op(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#is_or_is_not.
def visitIs_or_is_not(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#simple_expression.
def visitSimple_expression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_references.
def visitTable_references(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_reference.
def visitTable_reference(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_factor1.
def visitTable_factor1(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_factor2.
def visitTable_factor2(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_factor3.
def visitTable_factor3(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#table_factor4.
def visitTable_factor4(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#tableAtomBasic.
def visitTableAtomBasic(self, ctx):
table_name, table_alias = '', ''
if ctx.table_name():
table_name = ctx.table_name().getText()
if ctx.table_alias():
table_alias = ctx.table_alias().getText()
if table_name:
TableInfo.table_name_to_alias[table_name] = table_alias
if table_alias:
TableInfo.table_alias_to_name[table_alias] = table_name
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#tableAtomSubquery.
def visitTableAtomSubquery(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#tableAtomParen.
def visitTableAtomParen(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#tableAtomComplex.
def visitTableAtomComplex(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#join_clause.
def visitJoin_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#joinOn.
def visitJoinOn(self, ctx):
if ctx.expression():
for sub0 in ctx.expression():
for sub1 in sub0.children:
TableInfo.on_conditions.append(sub1.getText())
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#joinColumns.
def visitJoinColumns(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#index_hint_list.
def visitIndex_hint_list(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#index_options.
def visitIndex_options(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#index_hint.
def visitIndex_hint(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#index_list.
def visitIndex_list(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#partition_clause.
def visitPartition_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#partition_names.
def visitPartition_names(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#partition_name.
def visitPartition_name(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#subquery_alias.
def visitSubquery_alias(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by MySQLParser#subquery.
def visitSubquery(self, ctx):
return self.visitChildren(ctx)
|
StefanLim0/mysql-er
|
sqlparser/antlr/MySQLParserVisitor.py
|
Python
|
mit
| 7,488
|
[
"VisIt"
] |
94eb41ec8a3609b7281a8318ddd492d6eece35015f61980e10a69d2867505ef0
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Tableview"""
import logging
logger = logging.getLogger( 'camelot.view.controls.tableview' )
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QSizePolicy
from camelot.admin.action.list_action import ListActionGuiContext
from camelot.core.utils import variant_to_pyobject, ugettext as _
from camelot.view.proxy.queryproxy import QueryTableProxy
from camelot.view.controls.view import AbstractView
from camelot.view.controls.user_translatable_label import UserTranslatableLabel
from camelot.view.model_thread import post
from camelot.view.model_thread import object_thread
from camelot.view.model_thread import model_function
from camelot.view import register
from search import SimpleSearchControl
class ColumnGroupsWidget( QtGui.QTabBar ):
"""A tabbar the user can use to select a group of columns within an
item view.
:param table: a :class:`camelot.admin.table.Table` object, describing the
column groups.
:param table_widget: a :class:`QtGui.QTableView` widget of which columns will
be hidden and shown depending on the selected tab.
:param parent: a :class:`QtGui.QWidget`
"""
def __init__( self, table, table_widget, parent = None ):
from camelot.admin.table import ColumnGroup
super( ColumnGroupsWidget, self ).__init__( parent )
assert object_thread( self )
self.setShape( QtGui.QTabBar.RoundedSouth )
self.groups = dict()
self.table_widget = table_widget
column_index = 0
tab_index = 0
for column in table.columns:
if isinstance( column, ColumnGroup ):
self.addTab( unicode( column.verbose_name ) )
previous_column_index = column_index
column_index = column_index + len( column.get_fields() )
self.groups[ tab_index ] = ( previous_column_index,
column_index )
tab_index += 1
else:
column_index += 1
self.currentChanged.connect( self._current_index_changed )
@QtCore.pyqtSlot( QtCore.QModelIndex, int, int )
def columns_changed( self, index, first_column, last_column ):
assert object_thread( self )
self._current_index_changed( self.currentIndex() )
@QtCore.pyqtSlot()
def model_reset( self ):
assert object_thread( self )
self._current_index_changed( self.currentIndex() )
@QtCore.pyqtSlot( int )
def _current_index_changed( self, current_index ):
assert object_thread( self )
for tab_index, (first_column, last_column) in self.groups.items():
for column_index in range( first_column, last_column ):
self.table_widget.setColumnHidden( column_index,
tab_index != current_index )
class TableWidget( QtGui.QTableView ):
"""A widget displaying a table, to be used within a TableView. But it does
not rely on the model being Camelot specific, or a Collection Proxy.
.. attribute:: margin
margin, specified as a number of pixels, used to calculate the height of a row
in the table, the minimum row height will allow for this number of pixels below
and above the text.
:param lines_per_row: the number of lines of text that should be viewable in a single row.
"""
margin = 5
keyboard_selection_signal = QtCore.pyqtSignal()
def __init__( self, lines_per_row = 1, parent = None ):
QtGui.QTableView.__init__( self, parent )
logger.debug( 'create TableWidget' )
assert object_thread( self )
self._columns_changed = dict()
self.setSelectionBehavior( QtGui.QAbstractItemView.SelectRows )
self.setEditTriggers( QtGui.QAbstractItemView.SelectedClicked |
QtGui.QAbstractItemView.DoubleClicked |
QtGui.QAbstractItemView.CurrentChanged )
self.setSizePolicy( QSizePolicy.Expanding, QSizePolicy.Expanding )
self.horizontalHeader().setClickable( True )
self._header_font_required = QtGui.QApplication.font()
self._header_font_required.setBold( True )
line_height = QtGui.QFontMetrics(QtGui.QApplication.font()).lineSpacing()
self._minimal_row_height = line_height * lines_per_row + 2*self.margin
self.verticalHeader().setDefaultSectionSize( self._minimal_row_height )
self.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.horizontalHeader().sectionClicked.connect(
self.horizontal_section_clicked )
self.horizontalHeader().sectionResized.connect( self._save_section_width )
def timerEvent( self, event ):
"""On timer event, save changed column widths to the model
"""
assert object_thread( self )
for logical_index, new_width in self._columns_changed.items():
if self.horizontalHeader().isSectionHidden( logical_index ):
# don't save the width of a hidden section, since this will
# result in setting the width to 0
continue
old_size = variant_to_pyobject( self.model().headerData( logical_index,
Qt.Horizontal,
Qt.SizeHintRole ) )
# when the size is different from the one from the model, the
# user changed it
if old_size.width() != new_width:
new_size = QtCore.QSize( new_width, old_size.height() )
self.model().setHeaderData( logical_index,
Qt.Horizontal,
new_size,
Qt.SizeHintRole )
self._columns_changed = dict()
super( TableWidget, self ).timerEvent( event )
@QtCore.pyqtSlot(int, int, int)
def _save_section_width(self, logical_index, _old_size, new_width ):
# instead of storing the width immediately, a timer is started to store
# the width when all event processing is done. because at this time
# we cannot yet determine if the section at logical_index is hidden
# or not
#
# there is no need to start the timer, since this is done by the
# QAbstractItemView itself for doing the layout, here we only store
# which column needs to be saved.
assert object_thread( self )
self._columns_changed[ logical_index ] = new_width
@QtCore.pyqtSlot( int )
def horizontal_section_clicked( self, logical_index ):
"""Update the sorting of the model and the header"""
assert object_thread( self )
header = self.horizontalHeader()
order = Qt.AscendingOrder
if not header.isSortIndicatorShown():
header.setSortIndicatorShown( True )
elif header.sortIndicatorSection()==logical_index:
# apparently, the sort order on the header is already switched
# when the section was clicked, so there is no need to reverse it
order = header.sortIndicatorOrder()
header.setSortIndicator( logical_index, order )
self.model().sort( logical_index, order )
def close_editor(self):
"""Close the active editor, this method is used to prevent assertion
failures in QT when an editor is still open in the view for a cell
that no longer exists in the model
those assertion failures only exist in QT debug builds.
"""
assert object_thread( self )
current_index = self.currentIndex()
if not current_index:
return
self.closePersistentEditor( current_index )
def setModel( self, model ):
assert object_thread( self )
#
# An editor might be open that is no longer available for the new
# model. Not closing this editor, results in assertion failures
# in qt, resulting in segfaults in the debug build.
#
self.close_editor()
#
# Editor, closed. it should be safe to change the model
#
QtGui.QTableView.setModel( self, model )
register.register( model, self )
self.selectionModel().currentChanged.connect( self.activated )
@QtCore.pyqtSlot(QtCore.QModelIndex, QtCore.QModelIndex)
def activated( self, selectedIndex, previousSelectedIndex ):
assert object_thread( self )
option = QtGui.QStyleOptionViewItem()
new_size = self.itemDelegate( selectedIndex ).sizeHint( option,
selectedIndex )
row = selectedIndex.row()
if previousSelectedIndex.row() >= 0:
previous_row = previousSelectedIndex.row()
self.setRowHeight( previous_row, self._minimal_row_height )
self.setRowHeight( row, max( new_size.height(),
self._minimal_row_height ) )
def keyPressEvent(self, e):
assert object_thread( self )
if self.hasFocus() and e.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.keyboard_selection_signal.emit()
else:
super(TableWidget, self).keyPressEvent(e)
class AdminTableWidget( QtGui.QWidget ):
"""A table widget that inspects the admin class and changes the behavior
of the table as specified in the admin class"""
def __init__(self, admin, parent=None):
super( AdminTableWidget, self ).__init__( parent )
assert object_thread( self )
self._admin = admin
table_widget = TableWidget( parent = self,
lines_per_row = admin.lines_per_row )
table_widget.setObjectName( 'table_widget' )
column_groups = ColumnGroupsWidget( admin.get_table(), table_widget )
column_groups.setObjectName( 'column_groups' )
layout = QtGui.QVBoxLayout()
layout.setSpacing( 0 )
layout.setContentsMargins( 0, 0, 0, 0 )
layout.addWidget( table_widget )
layout.addWidget( column_groups )
self.setLayout( layout )
if admin.drop_action != None:
table_widget.setDragDropMode( QtGui.QAbstractItemView.DragDrop )
table_widget.setDropIndicatorShown( True )
def __getattr__( self, name ):
table_widget = self.findChild( QtGui.QWidget, 'table_widget' )
if table_widget != None:
return getattr( table_widget, name )
def setModel( self, model ):
assert object_thread( self )
table_widget = self.findChild( QtGui.QWidget, 'table_widget' )
column_groups = self.findChild( QtGui.QWidget, 'column_groups' )
if table_widget != None:
model.columnsInserted.connect( column_groups.columns_changed )
model.columnsRemoved.connect( column_groups.columns_changed )
model.layoutChanged.connect( column_groups.model_reset )
model.modelReset.connect( column_groups.model_reset )
table_widget.setModel( model )
column_groups.model_reset()
class RowsWidget( QtGui.QLabel ):
"""Widget that is part of the header widget, displaying the number of rows
in the table view"""
_number_of_rows_font = QtGui.QApplication.font()
def __init__( self, parent ):
QtGui.QLabel.__init__( self, parent )
assert object_thread( self )
self.setFont( self._number_of_rows_font )
def setNumberOfRows( self, rows ):
assert object_thread( self )
self.setText( _('(%i rows)')%rows )
class HeaderWidget( QtGui.QWidget ):
"""HeaderWidget for a tableview, containing the title, the search widget,
and the number of rows in the table"""
search_widget = SimpleSearchControl
rows_widget = RowsWidget
filters_changed_signal = QtCore.pyqtSignal()
_title_font = QtGui.QApplication.font()
_title_font.setBold( True )
def __init__( self, parent, admin ):
QtGui.QWidget.__init__( self, parent )
assert object_thread( self )
self._admin = admin
layout = QtGui.QVBoxLayout()
widget_layout = QtGui.QHBoxLayout()
search = self.search_widget( self )
search.expand_search_options_signal.connect(
self.expand_search_options )
title = UserTranslatableLabel( admin.get_verbose_name_plural(),
self )
title.setFont( self._title_font )
widget_layout.addWidget( title )
widget_layout.addWidget( search )
if self.rows_widget:
self.number_of_rows = self.rows_widget( self )
widget_layout.addWidget( self.number_of_rows )
else:
self.number_of_rows = None
layout.addLayout( widget_layout, 0 )
self._expanded_filters_created = False
self._expanded_search = QtGui.QWidget()
self._expanded_search.hide()
layout.addWidget( self._expanded_search, 1 )
self.setLayout( layout )
self.setSizePolicy( QSizePolicy.Minimum, QSizePolicy.Fixed )
self.setNumberOfRows( 0 )
self.search = search
def _fill_expanded_search_options(self, columns):
"""Given the columns in the table view, present the user
with more options to filter rows in the table
:param columns: a list of tuples with field names and attributes
"""
assert object_thread( self )
from camelot.view.controls.filter_operator import FilterOperator
from camelot.view.flowlayout import FlowLayout
layout = FlowLayout()
layout.setSpacing( 2 )
layout.setContentsMargins( 0, 0, 0, 0 )
for i, (field, attributes) in enumerate(columns):
if 'operators' in attributes and attributes['operators']:
box = QtGui.QGroupBox()
box_layout = QtGui.QVBoxLayout()
box_layout.setContentsMargins( 1, 1, 1, 1 )
widget = FilterOperator( self._admin.entity,
field, attributes,
box )
box_layout.addWidget( widget )
box.setLayout( box_layout )
widget.filter_changed_signal.connect( self._filter_changed )
layout.addWidget( box )
#layout.addStretch()
self._expanded_search.setLayout( layout )
self._expanded_filters_created = True
def _filter_changed(self):
assert object_thread( self )
self.filters_changed_signal.emit()
def decorate_query(self, query):
"""Apply expanded filters on the query"""
if self._expanded_filters_created:
for i in range(self._expanded_search.layout().count()):
box = self._expanded_search.layout().itemAt(i).widget()
if box:
widget = box.layout().itemAt(0).widget()
if widget:
query = widget.decorate_query(query)
return query
@QtCore.pyqtSlot()
def expand_search_options(self):
assert object_thread( self )
if self._expanded_search.isHidden():
if not self._expanded_filters_created:
post( self._admin.get_expanded_search_fields,
self._fill_expanded_search_options )
self._expanded_search.show()
else:
self._expanded_search.hide()
def setNumberOfRows( self, rows ):
assert object_thread( self )
if self.number_of_rows:
self.number_of_rows.setNumberOfRows( rows )
class TableView( AbstractView ):
"""
:param gui_context: a :class:`camelot.admin.action.application_action.ApplicationActionGuiContext`
object.
:param admin: an :class:`camelot.admin.entity_admin.EntityAdmin` object
:param search_text: a predefined search text to put in the search widget
:param parent: a :class:`QtGui.QWidget` object
A generic tableview widget that puts together some other widgets. The behaviour of this class and
the resulting interface can be tuned by specifying specific class attributes which define the underlying
widgets used ::
class MovieRentalTableView(TableView):
title_format = 'Grand overview of recent movie rentals'
The attributes that can be specified are :
.. attribute:: header_widget
The widget class to be used as a header in the table view::
header_widget = HeaderWidget
.. attribute:: table_widget
The widget class used to display a table within the table view ::
table_widget = TableWidget
.. attribute:: title_format
A string used to format the title of the view ::
title_format = '%(verbose_name_plural)s'
.. attribute:: table_model
A class implementing QAbstractTableModel that will be used as a model for the table view ::
table_model = QueryTableProxy
- emits the row_selected signal when a row has been selected
"""
header_widget = HeaderWidget
AdminTableWidget = AdminTableWidget
#
# The proxy class to use
#
table_model = QueryTableProxy
#
# Format to use as the window title
#
title_format = '%(verbose_name_plural)s'
row_selected_signal = QtCore.pyqtSignal(int)
def __init__( self,
gui_context,
admin,
search_text = None,
parent = None ):
super(TableView, self).__init__( parent )
assert object_thread( self )
self.admin = admin
self.application_gui_context = gui_context
self.gui_context = gui_context
post( self.get_title, self.change_title )
widget_layout = QtGui.QVBoxLayout()
if self.header_widget:
self.header = self.header_widget( self, admin )
widget_layout.addWidget( self.header )
self.header.search.search_signal.connect( self.startSearch )
self.header.search.cancel_signal.connect( self.cancelSearch )
self.header.search.on_arrow_down_signal.connect(self.focusTable)
if search_text:
self.header.search.search( search_text )
else:
self.header = None
widget_layout.setSpacing( 0 )
widget_layout.setContentsMargins(0, 0, 0, 0)
splitter = QtGui.QSplitter( self )
splitter.setObjectName('splitter')
widget_layout.addWidget( splitter )
table_widget = QtGui.QWidget( self )
filters_widget = QtGui.QWidget( self )
self.table_layout = QtGui.QVBoxLayout()
self.table_layout.setSpacing( 0 )
self.table_layout.setContentsMargins(0, 0, 0, 0)
self.table = None
self.filters_layout = QtGui.QVBoxLayout()
self.filters_layout.setSpacing( 0 )
self.filters_layout.setContentsMargins(0, 0, 0, 0)
self.actions = None
table_widget.setLayout( self.table_layout )
filters_widget.setLayout( self.filters_layout )
#filters_widget.hide()
self.set_admin( admin )
splitter.addWidget( table_widget )
splitter.addWidget( filters_widget )
self.setLayout( widget_layout )
self.search_filter = lambda q: q
shortcut = QtGui.QShortcut(QtGui.QKeySequence(QtGui.QKeySequence.Find), self)
shortcut.activated.connect( self.activate_search )
if self.header_widget:
self.header.filters_changed_signal.connect( self.rebuild_query )
# give the table widget focus to prevent the header and its search control to
# receive default focus, as this would prevent the displaying of 'Search...' in the
# search control, but this conflicts with the MDI, resulting in the window not
# being active and the menus not to work properly
#table_widget.setFocus( QtCore.Qt.OtherFocusReason )
#self.setFocusProxy(table_widget)
#self.setFocus( QtCore.Qt.OtherFocusReason )
post( self.admin.get_subclass_tree, self.setSubclassTree )
@QtCore.pyqtSlot()
def activate_search(self):
assert object_thread( self )
self.header.search.setFocus(QtCore.Qt.ShortcutFocusReason)
@model_function
def get_title( self ):
return self.title_format % {'verbose_name_plural':self.admin.get_verbose_name_plural()}
@QtCore.pyqtSlot(object)
def setSubclassTree( self, subclasses ):
assert object_thread( self )
if len( subclasses ) > 0:
from inheritance import SubclassTree
splitter = self.findChild(QtGui.QWidget, 'splitter' )
class_tree = SubclassTree( self.admin, splitter )
splitter.insertWidget( 0, class_tree )
class_tree.subclass_clicked_signal.connect( self.set_admin )
@QtCore.pyqtSlot(int)
def sectionClicked( self, section ):
"""emits a row_selected signal"""
assert object_thread( self )
#
# close the table editor before opening a form or such
#
# Qt seems to crash in certain cases when the editor is open and the
# underlying model is changed
#
if self.table:
self.table.close_editor()
self.admin.list_action.gui_run( self.gui_context )
def create_table_model( self, admin ):
"""Create a table model for the given admin interface"""
return self.table_model( admin,
None,
admin.get_columns )
def get_admin(self):
return self.admin
def get_model(self):
return self.table.model()
@QtCore.pyqtSlot( object )
def set_admin( self, admin ):
"""Switch to a different subclass, where admin is the admin object of the
subclass"""
assert object_thread( self )
logger.debug('set_admin called')
self.admin = admin
if self.table:
self.table.model().layoutChanged.disconnect( self.tableLayoutChanged )
self.table_layout.removeWidget(self.table)
self.table.deleteLater()
self.table.model().deleteLater()
splitter = self.findChild( QtGui.QWidget, 'splitter' )
self.table = self.AdminTableWidget( self.admin, splitter )
self.table.setObjectName('AdminTableWidget')
new_model = self.create_table_model( admin )
self.table.setModel( new_model )
self.table.verticalHeader().sectionClicked.connect( self.sectionClicked )
self.table.keyboard_selection_signal.connect(self.on_keyboard_selection_signal)
self.table.model().layoutChanged.connect( self.tableLayoutChanged )
self.tableLayoutChanged()
self.table_layout.insertWidget( 1, self.table )
self.gui_context = self.application_gui_context.copy( ListActionGuiContext )
self.gui_context.view = self
self.gui_context.admin = self.admin
self.gui_context.item_view = self.table
def get_filters_and_actions():
return ( admin.get_filters(), admin.get_list_actions() )
post( get_filters_and_actions, self.set_filters_and_actions )
@QtCore.pyqtSlot()
def on_keyboard_selection_signal(self):
assert object_thread( self )
self.sectionClicked( self.table.currentIndex().row() )
@QtCore.pyqtSlot()
def tableLayoutChanged( self ):
assert object_thread( self )
logger.debug('tableLayoutChanged')
model = self.table.model()
if self.header:
self.header.setNumberOfRows( model.rowCount() )
item_delegate = model.getItemDelegate()
if item_delegate:
self.table.setItemDelegate( item_delegate )
for i in range( model.columnCount() ):
self.table.setColumnWidth( i, model.headerData( i, Qt.Horizontal, Qt.SizeHintRole ).toSize().width() )
def closeEvent( self, event ):
"""reimplements close event"""
assert object_thread( self )
logger.debug( 'tableview closed' )
event.accept()
def selectTableRow( self, row ):
"""selects the specified row"""
assert object_thread( self )
self.table.selectRow( row )
def getColumns( self ):
"""return the columns to be displayed in the table view"""
assert object_thread( self )
return self.admin.get_columns()
def getTitle( self ):
"""return the name of the entity managed by the admin attribute"""
return self.admin.get_verbose_name()
@QtCore.pyqtSlot(object)
def _set_query(self, query_getter):
assert object_thread( self )
if isinstance(self.table.model(), QueryTableProxy):
self.table.model().setQuery(query_getter)
self.table.clearSelection()
@QtCore.pyqtSlot()
def refresh(self):
"""Refresh the whole view"""
assert object_thread( self )
post( self.get_admin, self.set_admin )
@QtCore.pyqtSlot()
def rebuild_query( self ):
"""resets the table model query"""
from filterlist import FilterList
def rebuild_query():
query = self.admin.get_query()
# a table view is not required to have a header
if self.header:
query = self.header.decorate_query(query)
filters = self.findChild(FilterList, 'filters')
if filters:
query = filters.decorate_query( query )
if self.search_filter:
query = self.search_filter( query )
query_getter = lambda:query
return query_getter
post( rebuild_query, self._set_query )
@QtCore.pyqtSlot(str)
def startSearch( self, text ):
"""rebuilds query based on filtering text"""
assert object_thread( self )
from camelot.view.search import create_entity_search_query_decorator
logger.debug( 'search %s' % text )
self.search_filter = create_entity_search_query_decorator( self.admin, unicode(text) )
self.rebuild_query()
@QtCore.pyqtSlot()
def cancelSearch( self ):
"""resets search filtering to default"""
assert object_thread( self )
logger.debug( 'cancel search' )
self.search_filter = lambda q: q
self.rebuild_query()
@QtCore.pyqtSlot(object)
def set_filters_and_actions( self, filters_and_actions ):
"""sets filters for the tableview"""
assert object_thread( self )
filters, actions = filters_and_actions
from camelot.view.controls.filterlist import FilterList
from camelot.view.controls.actionsbox import ActionsBox
logger.debug( 'setting filters for tableview' )
filters_widget = self.findChild(FilterList, 'filters')
actions_widget = self.findChild(ActionsBox, 'actions')
if filters_widget:
filters_widget.filters_changed_signal.disconnect( self.rebuild_query )
self.filters_layout.removeWidget(filters_widget)
filters_widget.deleteLater()
if actions_widget:
self.filters_layout.removeWidget(actions_widget)
actions_widget.deleteLater()
if filters:
splitter = self.findChild( QtGui.QWidget, 'splitter' )
filters_widget = FilterList( filters, parent=splitter )
filters_widget.setObjectName('filters')
self.filters_layout.addWidget( filters_widget )
filters_widget.filters_changed_signal.connect( self.rebuild_query )
#
# filters might have default values, so we can only build the queries now
#
self.rebuild_query()
if actions:
actions_widget = ActionsBox( parent = self,
gui_context = self.gui_context )
actions_widget.setObjectName( 'actions' )
actions_widget.set_actions( actions )
self.filters_layout.addWidget( actions_widget )
@QtCore.pyqtSlot()
def focusTable(self):
assert object_thread( self )
if self.table and self.table.model().rowCount() > 0:
self.table.setFocus()
self.table.selectRow(0)
|
jeroendierckx/Camelot
|
camelot/view/controls/tableview.py
|
Python
|
gpl-2.0
| 29,601
|
[
"VisIt"
] |
f18d11d970ac4baf218998bafef9cf9a9a89bcae0d44cb02dfb1d8d0ad592598
|
#============================================================================
# BALL - Example for a energy evaluations as it was used in Althaus
# et al. "A branch and cut algorithm for the optimal solution of the
# side-chain placement problem", 2000
#
# This example reads a PDB file and calculates a bonded energy using a force
# field and a non bonded energy (electrostatics only) by solving the Poisson-
# Boltzmann equation.
#============================================================================
#
# Lara Schneider 2010-04-22
#
import sys
from BALL import *
s = getSystems()[0]
#normalize the names and build all bonds
print "normalizing names and building bonds..."
db = FragmentDB("")
s.apply(db.normalize_names)
s.apply(db.build_bonds)
#create an AMBER force field without non-bonded interactions
FF = AmberFF(s);
#calculate the total energy
total_energy = FF.updateEnergy();
print FF.getResults()
print " total energy using force field evaluation: ", total_energy, " kJ/mol"
print "removing non bonded energy terms ..."
FF.removeComponent("Amber NonBonded")
#calculate the internal energy (neglecting internal VdW interactions)
internal_energy = FF.updateEnergy()
print FF.getResults()
print " internal energy: ", internal_energy, " kJ/mol"
#assign atom radii
radius_processor = AssignRadiusProcessor("radii/PARSE.siz")
s.apply(radius_processor)
#calculate the electrostatic part of the solvation energy
print "calculating electrostatic energy terms with FD-Poisson-Boltzmann ..."
fdpb = FDPB(s)
fdpb.solve();
print "... using dielectric constant in medium: ", fdpb.options[FDPB.Option.SOLVENT_DC].toFloat()
solvent_energy = fdpb.getEnergy()
fdpb.options[FDPB.Option.SOLVENT_DC] = 1.0
print "... using dielectric constant in vacuum: ", fdpb.options[FDPB.Option.SOLVENT_DC].toFloat()
fdpb.setup(s)
fdpb.solve()
vacuum_energy = fdpb.getEnergy()
print "\n electrostatic solvation free energy: ", solvent_energy - vacuum_energy
print " total energy using a combination of force field and FDPB evaluation: ", internal_energy - vacuum_energy + solvent_energy, " kJ/mol"
#that's it
print "done."
|
tkemmer/ball
|
doc/examples/PYTHON/BALLView/energy_BV.py
|
Python
|
lgpl-2.1
| 2,138
|
[
"Amber"
] |
80a1dfb59367dd3d66986054647ce95753081edc758523d11140fa6bb18d95af
|
# -*- coding: utf-8 -*-
"""Package declaration for heroku_tools/settings.
Contains the CLI application settings (as distinct from the Heroku
application settings, which are in the config module.)
The settings themselves are loaded at the end of this module, and
are therefore available by importing settings.settings (I know, but
that's namespacing for you).
>>> from heroku_tools.settings import settings
"""
import os
import click
import yaml
CWD = os.getcwd()
DEFAULT_SETTINGS = {
'app_conf_dir': CWD,
'git_work_dir': CWD,
'commands': {
'migrate': 'python manage.py migrate',
'collectstatic': 'python manage.py collectstatic',
},
'matches': {
'migrations': '/migrations/',
'staticfiles': '/static/',
}
}
ENVIRON_SETTINGS = {
'app_conf_dir': os.getenv('HEROKU_TOOLS_CONF_DIR'),
'git_work_dir': os.getenv('HEROKU_TOOLS_WORK_DIR'),
'editor': os.getenv('EDITOR') or os.getenv('VISUAL'),
'heroku_api_token': os.getenv('HEROKU_TOOLS_API_TOKEN'),
'commands': {
'migrate': os.getenv('HEROKU_TOOLS_MIGRATE_CMD'),
'collectstatic': os.getenv('HEROKU_TOOLS_STATIC_CMD'),
},
'matches': {
'migrations': os.getenv('HEROKU_TOOLS_MATCH_MIGRATIONS'),
'staticfiles': os.getenv('HEROKU_TOOLS_MATCH_STATICFILES'),
}
}
def get_settings(filename):
"""Load configuration for heroku-tools itself.
Configuration settings are read in in the following order:
- local .herokutoolsconf YAML file in current working directory
- environment variables
- defaults (set in this function)
This method is called within this module, making all settings available
to the rest of the application as heroku_tools.conf.settings.
"""
settings = {
'app_conf_dir': (
ENVIRON_SETTINGS.get('HEROKU_TOOLS_CONF_DIR') or
DEFAULT_SETTINGS.get('app_conf_dir')
),
'git_work_dir': (
ENVIRON_SETTINGS.get('HEROKU_TOOLS_WORK_DIR') or
DEFAULT_SETTINGS.get('git_work_dir')
),
'editor': ENVIRON_SETTINGS.get('editor'),
'heroku_api_token': ENVIRON_SETTINGS.get('HEROKU_TOOLS_API_TOKEN'),
'commands': {
'migrate': (
ENVIRON_SETTINGS.get('HEROKU_TOOLS_MIGRATE_CMD') or
DEFAULT_SETTINGS['commands']['migrate']
),
'collectstatic': (
ENVIRON_SETTINGS.get('HEROKU_TOOLS_STATIC_CMD') or
DEFAULT_SETTINGS['commands']['collectstatic']
),
},
'matches': {
'migrations': (
ENVIRON_SETTINGS.get('HEROKU_TOOLS_MATCH_MIGRATIONS') or
DEFAULT_SETTINGS['matches']['migrations']
),
'staticfiles': (
ENVIRON_SETTINGS.get('HEROKU_TOOLS_MATCH_STATICFILES') or
DEFAULT_SETTINGS['matches']['staticfiles']
),
}
}
if filename in (None, ''):
click.echo(u"No config specified, default settings will be applied.")
return settings
if os.path.exists(filename):
click.echo(u"Applying settings from %s" % filename)
try:
with open(filename, 'r') as settings_file:
local = yaml.load(settings_file)
settings.update(local.get('settings', {}))
settings['commands'].update(local.get('commands', {}))
settings['matches'].update(local.get('matches', {}))
return settings
except IOError as ex:
# if we can't read the file just blast through with the defaults.
click.echo(".herokutoolsconfig file could not be read: %s" % ex)
return settings
else:
click.echo(u"Config does not exist - %s" % filename)
click.echo(u"Default settings will be applied.")
return settings
# the default settings can be overridden by a local '.herokutoolsconf' files
SETTINGS = get_settings(os.path.join(os.getcwd(), '.herokutoolsconf'))
|
lowks/heroku-tools
|
heroku_tools/settings/__init__.py
|
Python
|
mit
| 4,047
|
[
"BLAST"
] |
8087959f5e801141ac11a5cab01822ee33211a3e9b0d5aff68435fcff175d66f
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import future.utils
from functools import reduce
from past.utils import old_div
import numpy as np
import moldesign as mdt
from .. import units as u
from .. import compute, orbitals, utils
from ..compute import packages
from ..interfaces.pyscf_interface import mol_to_pyscf, StatusLogger, SPHERICAL_NAMES
from .base import QMBase
from ..molecules import AtomicProperties
if future.utils.PY2:
from cStringIO import StringIO
else:
from io import StringIO
class LazyClassMap(object):
""" For lazily importing classes from modules (when there's a lot of import overhead)
Class names should be stored as their *absolute import strings* so that they can be imported
only when needed
Example:
>>> myclasses = LazyClassMap({'od': 'collections.OrderedDict'})
>>> myclasss['od']()
OrderedDict()
"""
def __init__(self, mapping):
self.mapping = mapping
def __getitem__(self, key):
import importlib
fields = self.mapping[key].split('.')
cls = fields[-1]
modname = '.'.join(fields[:-1])
mod = importlib.import_module(modname)
return getattr(mod, cls)
def __contains__(self, item):
return item in self.mapping
def __iter__(self):
return iter(self.mapping)
# PySCF metadata constants
THEORIES = LazyClassMap({'hf': 'pyscf.scf.RHF', 'rhf': 'pyscf.scf.RHF',
'uhf': 'pyscf.scf.UHF',
'mcscf': 'pyscf.mcscf.CASSCF', 'casscf': 'pyscf.mcscf.CASSCF',
'casci': 'pyscf.mcscf.CASCI',
'mp2': 'pyscf.mp.MP2',
'dft': 'pyscf.dft.RKS', 'rks': 'pyscf.dft.RKS', 'ks': 'pyscf.dft.RKS'})
NEEDS_REFERENCE = set('mcscf casscf casci mp2'.split())
NEEDS_FUNCTIONAL = set('dft rks ks uks'.split())
IS_SCF = set('rhf uhf hf dft rks ks'.split())
FORCE_CALCULATORS = LazyClassMap({'rhf': 'pyscf.grad.RHF', 'hf': 'pyscf.grad.RHF',
'rks': 'pyscf.grad.RKS', 'ks': 'pyscf.grad.RKS'})
@utils.exports
class PySCFPotential(QMBase):
DEFAULT_PROPERTIES = ['potential_energy',
'wfn',
'mulliken']
ALL_PROPERTIES = DEFAULT_PROPERTIES + ['forces',
'nuclear_forces',
'electronic_forces']
PARAM_SUPPORT = {'theory': ['rhf', 'rks', 'mp2'],
'functional': ['b3lyp', 'blyp', 'pbe0', 'x3lyp', 'MPW3LYP5']}
FORCE_UNITS = u.hartree / u.bohr
_PKG = packages.pyscf
@mdt.utils.kwargs_from(QMBase)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pyscfmol = None
self.reference = None
self.kernel = None
self.logs = StringIO()
self.logger = self._get_logger('PySCF interface')
@packages.pyscf.runsremotely(is_imethod=True, persist_refs=True)
def calculate(self, requests=None):
self.logger = self._get_logger('PySCF calc')
do_forces = 'forces' in requests
if do_forces and self.params.theory not in FORCE_CALCULATORS:
raise ValueError('Forces are only available for the following theories:'
','.join(FORCE_CALCULATORS))
if do_forces:
force_calculator = FORCE_CALCULATORS[self.params.theory]
self.prep(force=True) # rebuild every time
# Set up initial guess
if self.params.wfn_guess == 'stored':
dm0 = self.params.initial_guess.density_matrix_ao
else:
dm0 = None
# Compute reference WFN (if needed)
refobj = self.pyscfmol
if self.params.theory in NEEDS_REFERENCE:
reference = self._build_theory(self.params.get('reference', 'rhf'),
refobj)
kernel, failures = self._converge(reference, dm0=dm0)
refobj = self.reference = kernel
else:
self.reference = None
# Compute WFN
theory = self._build_theory(self.params['theory'],
refobj)
if self.params['theory'] not in IS_SCF:
theory.kernel()
self.kernel = theory
else:
self.kernel, failures = self._converge(theory, dm0=dm0)
# Compute forces (if requested)
if do_forces:
grad = force_calculator(self.kernel)
else:
grad = None
props = self._get_properties(self.reference, self.kernel, grad)
if self.params.store_orb_guesses:
self.params.wfn_guess = 'stored'
self.params.initial_guess = props['wfn']
return props
def _get_properties(self, ref, kernel, grad):
""" Analyze calculation results and return molecular properties
Args:
ref (pyscf.Kernel): Reference kernel (can be None)
kernel (pyscf.Kernel): Theory kernel
grad (pyscf.Gradient): Gradient calculation
Returns:
dict: Molecular property names and values
"""
result = {}
if self.reference is not None:
result['reference_energy'] = (ref.e_tot*u.hartree).defunits()
# TODO: check sign on correlation energy. Is this true for anything besides MP2?
if hasattr(kernel, 'e_corr'):
result['correlation_energy'] = (kernel.e_corr *u.hartree).defunits()
result['potential_energy'] = result['correlation_energy'] +\
result['reference_energy']
else:
result['potential_energy'] = (kernel.e_tot*u.hartree).defunits()
orb_calc = ref
else:
result['potential_energy'] = (kernel.e_tot*u.hartree).defunits()
orb_calc = kernel
if grad is not None:
f_e = -1.0 * grad.grad_elec() * self.FORCE_UNITS
f_n = -1.0 * grad.grad_nuc() * self.FORCE_UNITS
result['electronic_forces'] = f_e.defunits()
result['nuclear_forces'] = f_n.defunits()
result['forces'] = result['electronic_forces'] + result['nuclear_forces']
if self.params.theory in ('casscf', 'mcscf'):
from pyscf.mcscf import CASCI
casobj = CASCI(ref,
self.params.active_orbitals,
self.params.active_electrons)
elif self.params.theory == 'casci':
casobj = kernel
if self.params.theory in ('casscf', 'mcscf', 'casci'):
orb_calc = kernel # get the orbs directly from the post-HF theory
casobj.fcisolver.nroots = self.params.get('num_states',
self.params.state_average)
casresult = casobj.kernel()
result['state_energies'] = (casresult[0] * u.hartree).defunits()
result['ci_vectors'] = list(map(self._parse_fci_vector, casresult[2]))
# potential_energy is the energy of the molecule's assigned state
result['state_averaged_energy'] = result['potential_energy']
result['potential_energy'] = result['state_energies'][self.mol.electronic_state_index]
# TODO: add 'reference wavefunction' to result
ao_obj = ref
dips, tdips = _get_multiconf_dipoles(self.pyscfmol, casobj, len(casobj.ci))
result['state_dipole_moments'] = dips
result['transition_dipole_moments'] = tdips
result['dipole_moment'] = dips[0]
# TODO: this is general, put it somewhere else
oscs = {}
nstates = len(result['state_energies'])
for i in range(nstates):
for j in range(i+1, nstates):
excitation_energy = result['state_energies'][j]-result['state_energies'][i]
tdip = result['transition_dipole_moments'][i, j].norm()
oscs[i, j] = (2.0*tdip ** 2*u.m_e*excitation_energy/
(3.0*u.q_e ** 2*u.hbar ** 2)).to(u.ureg.dimensionless).magnitude
oscs[j, i] = -oscs[i, j]
result['oscillator_strengths'] = oscs
else:
ao_obj = orb_calc
ao_matrices = self._get_ao_matrices(ao_obj)
scf_matrices = self._get_scf_matrices(orb_calc, ao_matrices)
if hasattr(orb_calc, 'mulliken_pop'):
ao_pop, atom_pop = orb_calc.mulliken_pop(verbose=-1)
result['mulliken'] = AtomicProperties({a: p * u.q_e
for a, p in zip(self.mol.atoms, atom_pop)})
if hasattr(orb_calc, 'dip_moment'):
result['dipole_moment'] = orb_calc.dip_moment() * u.debye
# Build the electronic state object
basis = orbitals.basis.BasisSet(self.mol,
orbitals=self._get_ao_basis_functions(),
h1e=ao_matrices.h1e.defunits(),
overlaps=scf_matrices.pop('sao'),
name=self.params.basis)
el_state = orbitals.wfn.ElectronicWfn(self.mol,
self.pyscfmol.nelectron,
aobasis=basis,
fock_ao=scf_matrices['fock_ao'],
density_matrix_ao=scf_matrices['density_matrix_ao'],
description=self.theoryname)
# Build and store the canonical orbitals
cmos = []
for iorb, (coeffs, energy) in enumerate(zip(orb_calc.mo_coeff.T,
orb_calc.mo_energy * u.hartree)):
cmos.append(orbitals.Orbital(coeffs, wfn=el_state))
if hasattr(orb_calc, 'get_occ'):
for orb, occ in zip(cmos, orb_calc.get_occ()):
orb.occupation = occ
el_state.add_orbitals(cmos, orbtype='canonical')
# Return the result
result['wfn'] = el_state
return result
def prep(self, force=False):
# TODO: spin, isotopic mass, symmetry
for p in 'basis theory'.split():
if self.params.get(p, None) is None:
raise ValueError('Parameter "%s" is required' % p)
if self._prepped and not force: return
self.pyscfmol = self._build_mol()
self._prepped = True
def _build_mol(self):
"""TODO: where does charge go? Model or molecule?"""
pyscfmol = mol_to_pyscf(self.mol, self.params.basis,
symmetry=self.params.get('symmetry', None),
charge=self.get_formal_charge())
pyscfmol.stdout = self.logs
return pyscfmol
def _converge(self, method, dm0=None):
"""
Automatically try a bunch of fallback methods for convergence
see also https://www.molpro.net/info/2015.1/doc/manual/node176.html#sec:difficulthf
"""
# TODO: make this user configurable
# TODO: generalize outside of pyscf
energy = method.kernel(dm0=dm0)
failed = []
# stop here if it converged
if method.converged:
return method, failed
# fallback 1: don't use previous density matrix OR change initial_guess
failed.append(method)
if dm0 is not None:
method.init_guess = 'atom'
else:
method.init_guess = 'minao'
self.logger.handled('SCF failed to converge. Retrying with initial guess %s' % method.init_guess)
energy = method.kernel()
if method.converged:
return method, failed
# fallback 2: level shift, slower convergence
# this probably won't converge, but is intended to get us in the right basin for the next step
# TODO: should dynamically adjust level shift instead of hardcoded cycles
self.logger.handled('SCF failed to converge. Performing %d iterations with level shift of -0.5 hartree'
% (old_div(method.max_cycle, 2)))
failed.append(method)
method.init_guess = 'minao'
method.level_shift = -0.5
method.max_cycle /= 2
energy = method.kernel()
if method.converged:
return method, failed
# fallback 2 cont.: remove level shift and try to converge
self.logger.handled('Removing level shift and continuing')
level_shift_dm = method.make_rdm1()
method.level_shift = 0.0
method.max_cycle *= 2
energy = method.kernel(dm0=level_shift_dm)
if method.converged:
return method, failed
raise mdt.QMConvergenceError(method)
def _build_theory(self, name, refobj):
if name in ('mcscf', 'casci', 'casscf'):
theory = THEORIES[name](refobj,
self.params.active_orbitals,
self.params.active_electrons)
if name != 'casci' and self.params.state_average > 1:
theory = theory.state_average_([old_div(1.0,self.params.state_average)
for i in range(self.params.state_average)])
else:
theory = THEORIES[name](refobj)
theory.callback = StatusLogger('%s procedure:' % self.theoryname,
['cycle', 'e_tot'],
self.logger)
if 'scf_cycles' in self.params:
theory.max_cycle = self.params.scf_cycles
if 'functional' in self.params:
self._assign_functional(theory, name,
self.params.get('functional', None))
return theory
_OCCMAP = {('0', '0'): '0',
('1', '0'): 'a',
('0', '1'): 'b',
('1', '1'): '2'}
@property
def theoryname(self):
p = self.params
if p.theory == 'rks':
th = 'RKS(%s)' % p.functional.upper()
elif p.theory in ('casscf', 'casci'):
th = '%s(%d,%d)' % (p.theory.upper(), p.active_orbitals, p.active_electrons)
if p.theory == 'casscf' and p.state_average > 1:
th += ' SA-%d' % p.state_average
else:
th = p.theory.upper()
return '%s/%s' % (th, p.basis)
def _parse_fci_vector(self, ci_vecmat):
""" Translate the PySCF FCI matrix into a dictionary of configurations and weights
Args:
ci_vecmat (np.ndarray): ci vector from a PySCF FCI calculation
Returns:
Mapping[str, float]: dictionary of configuration weights (normalized) organized by
configuration label. Configurations labeled by their active space orbital
occupations: 0 (unoccupied), a (alpha electron only), b (beta electron only), or '2'
(doubly occupied)
Example:
>>> import numpy as np
>>> model = PySCFPotential(active_orbitals=2, active_electrons=2)
>>> model._parse_fci_vector(np.array([[1.0, 2.0],[3.0, 4.0]]))
{'20': 1.0,
'ba': 2.0,
'ab': 3.0,
'02': 4.0}
"""
from pyscf.fci import cistring
conf_bin = cistring.gen_strings4orblist(list(range(self.params.active_orbitals)),
old_div(self.params.active_electrons,2))
civecs = {}
for i, ca in enumerate(conf_bin):
for j, cb in enumerate(conf_bin):
astring = bin(ca)[2:].zfill(self.params.active_orbitals)
bstring = bin(cb)[2:].zfill(self.params.active_orbitals)
s = ''.join(reversed([self._OCCMAP[a, b] for a, b in zip(astring, bstring)]))
civecs[s] = ci_vecmat[i, j]
return civecs
@staticmethod
def _assign_functional(kernel, theory, fname):
if theory in NEEDS_FUNCTIONAL:
if fname is not None:
kernel.xc = fname
else:
raise ValueError('No functional specified for reference theory "%s"' % theory)
def _get_ao_basis_functions(self):
""" Convert pyscf basis functions into a list of atomic basis functions
Notes:
PySCF stores *shells* instead of a flat list, so we need to do a little hacky
guesswork to do this conversion. We include consistentcy checks with the annotated
list of basis functions stored from ``mole.cart_labels()``
As of PySCF v1.0, only cartesian orbitals appear to be supported, and that's all
supported here right now
Returns:
List[moldesign.Gaussians.AtomicBasisFunction]
"""
bfs = []
pmol = self.pyscfmol
orblabels = iter(pmol.spheric_labels())
for ishell in range(pmol.nbas): # loop over shells (n,l)
atom = self.mol.atoms[pmol.bas_atom(ishell)]
angular = pmol.bas_angular(ishell)
num_momentum_states = angular*2 + 1
exps = pmol.bas_exp(ishell) / (u.bohr**2)
num_contractions = pmol.bas_nctr(ishell)
coeffs = pmol.bas_ctr_coeff(ishell)
for ictr in range(num_contractions): # loop over contractions in shell
for ibas in range(num_momentum_states): # loop over angular states in shell
label = next(orblabels)
sphere_label = label[3]
l, m = SPHERICAL_NAMES[sphere_label]
assert l == angular
# Note: this is for metadata only, should not be used in any calculations
n = int(''.join(x for x in label[2] if x.isdigit()))
primitives = [orbitals.SphericalGaussian(atom.position.copy(),
exp, l, m,
coeff=coeff[ictr],
normalized=True)
for exp, coeff in zip(exps, coeffs)]
bfs.append(orbitals.AtomicBasisFunction(atom,
n=n, l=angular, m=m,
primitives=primitives))
return bfs
def _get_basis_name(self):
"""
Translate basis_orbitals set name into a spec that pyscf recognizes
:return:
"""
# TODO: actually implement this
return self.params.basis
@staticmethod
def _get_ao_matrices(mf):
h1e = mf.get_hcore() * u.hartree
sao = mf.get_ovlp()
return utils.DotDict(h1e=h1e, sao=sao)
def _get_scf_matrices(self, mf, ao_mats):
dm = mf.make_rdm1()
veff = mf.get_veff(dm=dm) * u.hartree
fock = ao_mats.h1e + veff
scf_matrices = dict(density_matrix_ao=dm,
h2e=veff,
fock_ao=fock)
scf_matrices.update(ao_mats)
return scf_matrices
def _get_logger(self, logname):
# the "Logger" import can cause pickling problems, so we do it here
from ..helpers import Logger
return Logger(logname)
def _get_multiconf_dipoles(basis, mcstate, nstates):
""" Compute dipoles and transition dipoles. Adapted from PySCF examples
Note:
Dipole moments are computed using the center of the nuclear charge as the origin. Dipole
moments will need to be annotated or translated appropriately for charges systems.
Args:
basis ():
mcstate ():
nstates ():
Returns:
List[u.Vector[dipole]]: Dipole moments for each state
Mapping[Tuple[int, int], u.Vector[dipole]]: mapping from pairs of state ids to transition
dipole moments
References:
https://github.com/sunqm/pyscf/blob/e4d824853c49b7c19eb35cd6f9fe6ea675de932d/examples/1-advanced/030-transition_dipole.py
"""
nuc_charges = [basis.atom_charge(i) for i in range(basis.natm)]
nuc_coords = [basis.atom_coord(i) for i in range(basis.natm)]
nuc_charge_center = np.einsum('z,zx->x', nuc_charges, nuc_coords)/sum(nuc_charges)
basis.set_common_orig_(nuc_charge_center)
nuc_dip = np.einsum('i,ix->x', nuc_charges, nuc_coords-nuc_charge_center) * u.a0 * u.q_e
dip_ints = basis.intor('cint1e_r_sph', comp=3)
orbcas = mcstate.mo_coeff[:, mcstate.ncore:mcstate.ncore+mcstate.ncas]
dipoles, transitions = [], {}
for istate in range(nstates):
for fstate in range(istate, nstates):
t_dm1 = mcstate.fcisolver.trans_rdm1(mcstate.ci[istate], mcstate.ci[fstate],
mcstate.ncas, mcstate.nelecas)
t_dm1_ao = reduce(np.dot, (orbcas, t_dm1, orbcas.T))
moment = np.einsum('xij,ji->x', dip_ints, t_dm1_ao) * u.a0 * u.q_e
if istate == fstate:
dipoles.append(moment)
else:
transitions[istate, fstate] = transitions[fstate, istate] = moment
for idip, d in enumerate(dipoles):
dipoles[idip] = nuc_dip - d
return dipoles, transitions
|
Autodesk/molecular-design-toolkit
|
moldesign/models/pyscf.py
|
Python
|
apache-2.0
| 22,179
|
[
"Molpro",
"PySCF"
] |
86e2500b4f5475a00f33bb1ab12f5d1e1a759d4be365f30dd92bd308406a56d4
|
# Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision: 70850 $"
import sys, os, imp, re, optparse
from glob import glob
from platform import machine as platform_machine
from distutils import log
from distutils import sysconfig
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
ANDROID_SRC = os.environ.get('ANDROID_SRC')
if ANDROID_SRC is None:
print 'Android source directory not set.'
sys.exit(1)
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (at the front) if
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory."""
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
dirlist.insert(0, dir)
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
# Figure out the location of the source code for extension modules
# (This logic is copied in distutils.test.test_sysconfig,
# so building in a separate directory does not break test_distutils.)
moddir = os.path.join(os.getcwd(), srcdir, 'Modules')
moddir = os.path.normpath(moddir)
srcdir, tail = os.path.split(moddir)
srcdir = os.path.normpath(srcdir)
moddir = os.path.normpath(moddir)
moddirlist = [moddir]
incdirlist = ['./Include']
# Platform-dependent module source and include directories
platform = self.get_platform()
if platform in ('darwin', 'mac') and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(os.getcwd(), srcdir, 'Mac/Modules')
moddirlist.append(macmoddir)
incdirlist.append('./Mac/Include')
alldirlist = moddirlist + incdirlist
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = glob("Include/*.h") + ["pyconfig.h"]
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, alldirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
ext.include_dirs.append( '.' ) # to get config.h
for incdir in incdirlist:
ext.include_dirs.append( os.path.join(srcdir, incdir) )
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
if platform != 'mac':
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print "%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g)
if missing:
print
print "Failed to find the necessary bits to build these modules:"
print_three_column(missing)
print ("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print
if self.failed:
failed = self.failed[:]
print
print "Failed to build these modules:"
print_three_column(failed)
print
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if self.get_platform() == 'darwin' and (
sys.maxint > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if self.get_platform() == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
return # Workaround for cross compilation.
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def get_platform(self):
# Get value of sys.platform
for platform in ['cygwin', 'beos', 'darwin', 'atheos', 'osf1']:
if sys.platform.startswith(platform):
return platform
return sys.platform
def detect_modules(self):
# Ensure that android libs are always used.
lib_dirs = [
'/out/target/product/generic/obj/lib',
'/out/target/product/generic/system/lib',
]
for lib_dir in lib_dirs:
add_dir_to_list(self.compiler.library_dirs, ANDROID_SRC + lib_dir)
# Ensure that android includes are always used.
include_dirs = [
'/system/core/include',
'/hardware/libhardware/include',
'/hardware/ril/include',
'/dalvik/libnativehelper/include',
'/frameworks/base/include',
'/external/skia/include',
'/external/zlib',
'/external/openssl/include',
'/external/openssl/crypto',
'/out/target/product/generic/obj/include',
'/bionic/libc/arch-arm/include',
'/bionic/libc/include',
'/bionic/libstdc++/include',
'/bionic/libc/kernel/common',
'/bionic/libc/kernel/arch-arm',
'/bionic/libm/include',
'/bionic/libm/include/arch/arm',
'/bionic/libthread_db/include',
'/bionic/libm/arm',
'/bionic/libm',
'/kernel/include',
'/out/target/product/generic/obj/SHARED_LIBRARIES/libm_intermediates',
]
for include_dir in include_dirs:
add_dir_to_list(self.compiler.include_dirs, ANDROID_SRC + include_dir)
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
try:
have_unicode = unicode
except NameError:
have_unicode = 0
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = self.compiler.library_dirs
inc_dirs = self.compiler.include_dirs
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
platform = self.get_platform()
(srcdir,) = sysconfig.get_config_vars('srcdir')
# Check for AtheOS which has libraries in non-standard locations
if platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
if platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses directories
# with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform in ['darwin', 'beos', 'mac']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
exts.append( Extension('_weakref', ['_weakref.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'],
libraries=math_libs) )
# fast iterator tools implemented in C
exts.append( Extension("itertools", ["itertoolsmodule.c"]) )
# code that will be builtins in the future, but conflict with the
# current builtins
exts.append( Extension('future_builtins', ['future_builtins.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# high-performance collections
exts.append( Extension("_collections", ["_collectionsmodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# Python 3.0 _fileio module
exts.append( Extension("_fileio", ["_fileio.c"]) )
# Python 3.0 _bytesio module
exts.append( Extension("_bytesio", ["_bytesio.c"]) )
# _functools
exts.append( Extension("_functools", ["_functoolsmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profilers (_lsprof is for cProfile.py)
exts.append( Extension('_hotshot', ['_hotshot.c']) )
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
if have_unicode:
exts.append( Extension('unicodedata', ['unicodedata.c']) )
else:
missing.append('unicodedata')
# access to ISO C locale support
data = open('pyconfig.h').read()
m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data)
if m is not None:
locale_libs = ['intl']
else:
locale_libs = []
if platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
exts.append( Extension('_locale', ['_localemodule.c'],
libraries=locale_libs,
extra_link_args=locale_extra_link_args) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
exts.append( Extension('fcntl', ['fcntlmodule.c']) )
if platform not in ['mac']:
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
else:
missing.extend(['pwd', 'grp', 'spwd'])
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
if platform not in ['atheos', 'mac']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
else:
missing.append('mmap')
# Lance Ellinghaus's syslog module
if platform not in ['mac']:
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
else:
missing.append('syslog')
# George Neville-Neil's timing module:
# Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html
# http://mail.python.org/pipermail/python-dev/2006-January/060023.html
#exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# Disabled on 64-bit platforms
if True: #sys.maxint != 9223372036854775807L:
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
else:
missing.extend(['imageop'])
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
if platform == 'darwin': # and os.uname()[2] < '9.':
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
# FIXME: The readline emulation on 10.5 is better, but the
# readline module doesn't compile out of the box.
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom readline gets picked up
# before the (broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if self.compiler.find_library_file(lib_dirs,
'ncursesw'):
readline_libs.append('ncursesw')
elif self.compiler.find_library_file(lib_dirs,
'ncurses'):
readline_libs.append('ncurses')
elif self.compiler.find_library_file(lib_dirs, 'curses'):
readline_libs.append('curses')
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
if platform not in ['mac']:
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
else:
missing.append('crypt')
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [ANDROID_SRC + '/external/openssl/ssl/include']
ssl_incs = find_file('openssl/ssl.h', inc_dirs, search_for_ssl_incs_in)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
for ssl_inc_dir in inc_dirs + search_for_ssl_incs_in:
name = os.path.join(ssl_inc_dir, 'openssl', 'opensslv.h')
if os.path.isfile(name):
try:
incfile = open(name, 'r')
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
break
except IOError:
pass
# first version found is what we'll use (as the compiler should)
if openssl_ver:
break
#print 'openssl_ver = 0x%08x' % openssl_ver
if (ssl_incs is not None and
ssl_libs is not None and
openssl_ver >= 0x00907000):
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
# these aren't strictly missing since they are unneeded.
#missing.extend(['_sha', '_md5'])
else:
# The _sha module implements the SHA1 hash algorithm.
exts.append( Extension('_sha', ['shamodule.c']) )
# The _md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The
# necessary files md5.c and md5.h are included here.
exts.append( Extension('_md5',
sources = ['md5module.c', 'md5.c'],
depends = ['md5.h']) )
missing.append('_hashlib')
if (openssl_ver < 0x00908000):
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c']) )
exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (4, 7)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
# Use this function to filter out known bad configurations.
if (4, 6) == db_ver[:2]:
# BerkeleyDB 4.6.x is not stable on many architectures.
arch = platform_machine()
if arch not in ('i386', 'i486', 'i586', 'i686',
'x86_64', 'ia64'):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if db_setup_debug: print "db: looking for db.h in", f
if os.path.exists(f):
f = open(f).read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print "db.h:", db_ver, "patch", db_patch,
print "being ignored (4.6.x must be >= 4.6.21)"
continue
if ( (not db_ver_inc_map.has_key(db_ver)) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if db_setup_debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print "db.h: no version number version in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
# Look for a version specific db-X.Y before an ambiguoius dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found:
if db_setup_debug:
print "bsddb using BerkeleyDB lib:", db_ver, dblib
print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
db_incs = [db_incdir]
dblibs = [dblib]
# We add the runtime_library_dirs argument because the
# BerkeleyDB lib we're linking against often isn't in the
# system dynamic library search path. This is usually
# correct and most trouble free, but may cause problems in
# some unusual system configurations (e.g. the directory
# is on an NFS server that goes away).
exts.append(Extension('_bsddb', ['_bsddb.c'],
depends = ['bsddb.h'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
libraries=dblibs))
else:
if db_setup_debug: print "db: no appropriate library found"
db_incs = None
dblibs = []
dblib_dir = None
missing.append('_bsddb')
# The sqlite interface
sqlite_setup_debug = True # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ANDROID_SRC + '/external/sqlite/dist']
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
for d in sqlite_inc_paths:
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"(.*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print "%s/sqlite3.h: version %s"%(d, sqlite_version)
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print "%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION)
elif sqlite_setup_debug:
print "sqlite: %s had no SQLITE_VERSION"%(f,)
if sqlite_incdir:
sqlite_libfile = os.path.join(ANDROID_SRC, 'out/target/product/generic/system/lib/libsqlite.so')
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if sys.platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite",]))
else:
missing.append('_sqlite3')
# Look for Berkeley db 1.85. Note that it is built as a different
# module name so it can be included even when later versions are
# available. A very restrictive search is performed to avoid
# accidentally building this module with a later version of the
# underlying db library. May BSD-ish Unixes incorporate db 1.85
# symbols into libc and place the include file in /usr/include.
#
# If the better bsddb library can be built (db_incs is defined)
# we do not build this one. Otherwise this build will pick up
# the more recent berkeleydb's db.h file first in the include path
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
if os.path.exists(f) and not db_incs:
data = open(f).read()
m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data)
if m is not None:
# bingo - old version used hash file format version 2
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
libraries = platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
else:
exts.append(Extension('bsddb185', ['bsddbmodule.c']))
else:
missing.append('bsddb185')
else:
missing.append('bsddb185')
# The standard Unix dbm module:
if platform not in ['cygwin']:
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others don't
if self.compiler.find_library_file(lib_dirs, 'ndbm'):
ndbm_libs = ['ndbm']
else:
ndbm_libs = []
exts.append( Extension('dbm', ['dbmmodule.c'],
define_macros=[('HAVE_NDBM_H',None)],
libraries = ndbm_libs ) )
elif self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs, 'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
exts.append( Extension(
'dbm', ['dbmmodule.c'],
define_macros=[('HAVE_GDBM_NDBM_H',None)],
libraries = gdbm_libs ) )
elif find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
exts.append( Extension(
'dbm', ['dbmmodule.c'],
define_macros=[('HAVE_GDBM_DASH_NDBM_H',None)],
libraries = gdbm_libs ) )
else:
missing.append('dbm')
elif db_incs is not None:
exts.append( Extension('dbm', ['dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[('HAVE_BERKDB_H',None),
('DB_DBM_HSEARCH',None)],
libraries=dblibs))
else:
missing.append('dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if (self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('gdbm')
# Unix-only modules
if platform not in ['mac', 'win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
else:
missing.append('resource')
# Sun yellow pages. Some systems have the functions in libc.
if platform not in ['cygwin', 'atheos', 'qnx6']:
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if (self.compiler.find_library_file(lib_dirs, 'ncursesw')):
curses_libs = ['ncursesw']
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif (self.compiler.find_library_file(lib_dirs, 'ncurses')):
curses_libs = ['ncurses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif (self.compiler.find_library_file(lib_dirs, 'curses')
and platform != 'darwin'):
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
fp = open(zlib_h)
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if sys.platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if sys.platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a
# group of developers on SourceForge; see www.libexpat.org for
# more information. The pyexpat module was written by Paul
# Prescod after a prototype by Jack Jansen. The Expat source
# is included in Modules/expat/. Usage of a system
# shared libexpat.so/expat.dll is not advised.
#
# More information on Expat can be found at www.libexpat.org.
#
define_macros = [('HAVE_EXPAT_CONFIG_H', '1')]
expat_include_dirs = [os.path.join(ANDROID_SRC, 'external', 'expat'),
os.path.join(ANDROID_SRC, 'external', 'expat', 'lib')]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_include_dirs,
sources = ['pyexpat.c',
'expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c',
],
libraries=['expat']))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_include_dirs,
sources = ['_elementtree.c'],
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
if have_unicode:
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
else:
missing.append('_multibytecodec')
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
missing.append('_codecs_%s' % loc)
# Dynamic loading module
if True: #sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
if (dl_inc is not None) and (platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
else:
missing.append('dl')
else:
missing.append('dl')
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
HAVE_BROKEN_SEM_GETVALUE=1
)
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=0,
HAVE_BROKEN_SEM_UNLINK=1
)
libraries = []
elif platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict( # FreeBSD
HAVE_SEM_OPEN=0,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
)
libraries = []
elif platform.startswith('openbsd'):
macros = dict( # OpenBSD
HAVE_SEM_OPEN=0, # Not implemented
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
)
libraries = []
elif platform.startswith('netbsd'):
macros = dict( # at least NetBSD 5
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
HAVE_BROKEN_SEM_GETVALUE=1
)
libraries = []
else: # Linux and other unices
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1
)
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if macros.get('HAVE_SEM_OPEN', False):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
# End multiprocessing
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
else:
missing.append('linuxaudiodev')
if platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8'):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
else:
missing.append('sunaudiodev')
if platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if os.uname()[2] > '8.':
# We're on Mac OS X 10.4 or later, the compiler should
# support '-Wno-deprecated-declarations'. This will
# surpress deprecation warnings for the Carbon extensions,
# these extensions wrap the Carbon APIs and even those
# parts that are deprecated.
carbon_extra_compile_args = ['-Wno-deprecated-declarations']
else:
carbon_extra_compile_args = []
# Mac OS X specific modules.
def macSrcExists(name1, name2=''):
if not name1:
return None
names = (name1,)
if name2:
names = (name1, name2)
path = os.path.join(srcdir, 'Mac', 'Modules', *names)
return os.path.exists(path)
def addMacExtension(name, kwds, extra_srcs=[]):
dirname = ''
if name[0] == '_':
dirname = name[1:].lower()
cname = name + '.c'
cmodulename = name + 'module.c'
# Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c
if macSrcExists(cname):
srcs = [cname]
elif macSrcExists(cmodulename):
srcs = [cmodulename]
elif macSrcExists(dirname, cname):
# XXX(nnorwitz): If all the names ended with module, we
# wouldn't need this condition. ibcarbon is the only one.
srcs = [os.path.join(dirname, cname)]
elif macSrcExists(dirname, cmodulename):
srcs = [os.path.join(dirname, cmodulename)]
else:
raise RuntimeError("%s not found" % name)
# Here's the whole point: add the extension with sources
exts.append(Extension(name, srcs + extra_srcs, **kwds))
# Core Foundation
core_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'CoreFoundation'],
}
addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c'])
addMacExtension('autoGIL', core_kwds)
# Carbon
carbon_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'Carbon'],
}
CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav',
'OSATerminology', 'icglue',
# All these are in subdirs
'_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl',
'_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm',
'_Help', '_Icn', '_IBCarbon', '_List',
'_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs',
'_Scrap', '_Snd', '_TE',
]
for name in CARBON_EXTS:
addMacExtension(name, carbon_kwds)
# Workaround for a bug in the version of gcc shipped with Xcode 3.
# The _Win extension should build just like the other Carbon extensions, but
# this actually results in a hard crash of the linker.
#
if '-arch ppc64' in cflags and '-arch ppc' in cflags:
win_kwds = {'extra_compile_args': carbon_extra_compile_args + ['-arch', 'i386', '-arch', 'ppc'],
'extra_link_args': ['-framework', 'Carbon', '-arch', 'i386', '-arch', 'ppc'],
}
addMacExtension('_Win', win_kwds)
else:
addMacExtension('_Win', carbon_kwds)
# Application Services & QuickTime
app_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework','ApplicationServices'],
}
addMacExtension('_Launch', app_kwds)
addMacExtension('_CG', app_kwds)
exts.append( Extension('_Qt', ['qt/_Qtmodule.c'],
extra_compile_args=carbon_extra_compile_args,
extra_link_args=['-framework', 'QuickTime',
'-framework', 'Carbon']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
return missing
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in 'Tcl', 'Tk'
for H in 'Headers', 'Versions/Current/PrivateHeaders'
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
if 'x86_64' in archs or 'ppc64' in archs:
try:
archs.remove('x86_64')
except ValueError:
pass
try:
archs.remove('ppc64')
except ValueError:
pass
for a in archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
platform = self.get_platform()
if (platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.5', '85', '8.4', '84', '8.3', '83', '8.2',
'82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs, 'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs, 'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in sys.platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
(srcdir,) = sysconfig.get_config_vars('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if sys.platform == 'darwin':
return self.configure_ctypes_darwin(ext)
(srcdir,) = sysconfig.get_config_vars('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = []
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print "Failed to configure _ctypes module"
return False
fficonfig = {}
execfile(ffi_configfile, globals(), fficonfig)
ffi_srcdir = os.path.join(fficonfig['ffi_srcdir'], 'src')
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir, ffi_srcdir]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c',
'_ctypes/malloc_closure.c']
depends = ['_ctypes/ctypes.h']
if sys.platform == 'darwin':
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif sys.platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif sys.platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if sys.platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
fp = open(ffi_h)
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0644, 0755)
self.set_dir_modes(self.install_dir, 0755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
os.path.walk(dirname, self.set_dir_modes_visitor, mode)
def set_dir_modes_visitor(self, mode, dirname, names):
if os.path.islink(dirname): return
log.info("changing mode of %s to %o", dirname, mode)
if not self.dry_run: os.chmod(dirname, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = filter(None, CLASSIFIERS.split("\n")),
platforms = ["Many"],
# Build info
cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall,
'install_lib':PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle',
'Tools/scripts/2to3',
'Lib/smtpd.py']
)
# --install-platlib
if __name__ == '__main__':
main()
|
Lh4cKg/sl4a
|
python/src/setup.py
|
Python
|
apache-2.0
| 82,962
|
[
"VisIt"
] |
801f040a652d30bba1495bff67754a665446b49391f71c0283d1edec4b78a760
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the class describing the coordination geometries that can exist in a given structure. These
"model" coordination geometries are described in the following articles :
- Pure Appl. Chem., Vol. 79, No. 10, pp. 1779--1799, 2007.
- Acta Cryst. A, Vol. 46, No. 1, pp. 1--11, 1990.
The module also contains descriptors of part of these geometries (plane of separation, ...) that are used in the
identification algorithms.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import abc
import itertools
import json
import os
import numpy as np
from monty.json import MontyDecoder, MSONable
from scipy.special import factorial
module_dir = os.path.dirname(os.path.abspath(__file__))
UNKNOWN_ENVIRONMENT_SYMBOL = "UNKNOWN"
UNCLEAR_ENVIRONMENT_SYMBOL = "UNCLEAR"
EXPLICIT_PERMUTATIONS = "EXPLICIT_PERMUTATIONS"
SEPARATION_PLANE = "SEPARATION_PLANE"
class AbstractChemenvAlgorithm(MSONable, metaclass=abc.ABCMeta):
"""
Base class used to define a Chemenv algorithm used to identify the correct permutation for the computation
of the Continuous Symmetry Measure.
"""
def __init__(self, algorithm_type):
"""
Base constructor for ChemenvAlgorithm.
Args:
algorithm_type (str): Type of algorithm.
"""
self._algorithm_type = algorithm_type
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of the algorithm
"""
pass
@property
def algorithm_type(self):
"""
Return the type of algorithm.
Returns: Type of the algorithm
"""
return self._algorithm_type
@abc.abstractmethod
def __str__(self):
return ""
class ExplicitPermutationsAlgorithm(AbstractChemenvAlgorithm):
"""
Class representing the algorithm doing the explicit permutations for the calculation of
the Continuous Symmetry Measure.
"""
def __init__(self, permutations):
"""
Initializes a separation plane for a given perfect coordination geometry.
Args:
permutations: Permutations used for this algorithm.
"""
super().__init__(algorithm_type=EXPLICIT_PERMUTATIONS)
self._permutations = permutations
def __str__(self):
return self.algorithm_type
@property
def permutations(self):
"""
Return the permutations to be performed for this algorithm.
Returns: Permutations to be performed.
"""
return self._permutations
@property
def as_dict(self):
"""
Return the JSON serializable dict representation of this ExplicitPermutationsAlgorithm algorithm.
Returns: a JSON serializable dict representation of this ExplicitPermutationsAlgorithm algorithm.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"permutations": self._permutations,
}
@classmethod
def from_dict(cls, dd):
"""
Reconstructs the ExplicitPermutationsAlgorithm algorithm from its JSON serializable dict representation.
Args:
dd: a JSON serializable dict representation of an ExplicitPermutationsAlgorithm algorithm.
Returns: an ExplicitPermutationsAlgorithm algorithm.
"""
return cls(dd["permutations"])
class SeparationPlane(AbstractChemenvAlgorithm):
"""
Class representing the algorithm using separation planes for the calculation of
the Continuous Symmetry Measure.
"""
def __init__(
self,
plane_points,
mirror_plane=False,
ordered_plane=False,
point_groups=None,
ordered_point_groups=None, # include_inverted_plane=False,
# do_inverse_pt_gp_permutations=False, plane_type='MIRROR',
explicit_permutations=None,
minimum_number_of_points=None,
explicit_optimized_permutations=None,
multiplicity=None,
other_plane_points=None,
): # , plane_safe_permutations=False):
"""
Initializes a separation plane for a given perfect coordination geometry
Args:
plane_points: Indices of the points that are in the plane in the perfect structure (and should be
found in the defective one as well).
mirror_plane: True if the separation plane is a mirror plane, in which case there is a correspondence
of the points in each point_group (can reduce the number of permutations).
ordered_plane: True if the order of the points in the plane can be taken into account to reduce the
number of permutations.
point_groups: Indices of the points in the two groups of points separated by the plane.
ordered_point_groups: Whether the order of the points in each group of points can be taken into account to
reduce the number of permutations.
explicit_permutations: Explicit permutations to be performed in this separation plane algorithm.
minimum_number_of_points: Minimum number of points needed to initialize a separation plane
for this algorithm.
explicit_optimized_permutations: Optimized set of explicit permutations to be performed in this
separation plane algorithm.
multiplicity: Number of such planes in the model geometry.
other_plane_points: Indices of the points that are in the plane in the perfect structure for the other
planes. The multiplicity should be equal to the length of this list + 1 ("main" separation plane +
the other ones).
"""
super().__init__(algorithm_type=SEPARATION_PLANE)
self.mirror_plane = mirror_plane
self.plane_points = plane_points
self.point_groups = point_groups
if len(point_groups[0]) > len(point_groups[1]):
raise RuntimeError(
"The number of points in the first group should be\n"
"less than or equal to the number of points in the second group"
)
self._hash = 10000 * len(plane_points) + 100 * len(point_groups[0]) + len(point_groups[1])
self.ordered_plane = ordered_plane
self.ordered_point_groups = [False, False] if ordered_point_groups is None else ordered_point_groups
# self._ordered_indices = list(point_groups[0])
# self._ordered_indices.extend(plane_points)
# self._ordered_indices.extend(point_groups[1])
# self._inv_ordered_indices = np.argsort(self._ordered_indices)
self.explicit_permutations = explicit_permutations
self.explicit_optimized_permutations = explicit_optimized_permutations
self._safe_permutations = None
if self.explicit_optimized_permutations is not None:
self._permutations = self.explicit_optimized_permutations
elif self.explicit_permutations is not None:
self._permutations = self.explicit_permutations
self.multiplicity = multiplicity
self.other_plane_points = other_plane_points
self.minimum_number_of_points = minimum_number_of_points
self.maximum_number_of_points = len(self.plane_points)
self._ref_separation_perm = list(self.point_groups[0])
self._ref_separation_perm.extend(list(self.plane_points))
self._ref_separation_perm.extend(list(self.point_groups[1]))
self._argsorted_ref_separation_perm = list(np.argsort(self._ref_separation_perm))
self.separation = (
len(point_groups[0]),
len(plane_points),
len(point_groups[1]),
)
# @property
# def ordered_indices(self):
# """
# Ordered indices of the separation plane.
#
# Examples:
# For a separation plane of type 2|4|3, with plane_points indices [0, 3, 5, 8] and
# point_groups indices [1, 4] and [2, 7, 6], the list of ordered indices is :
# [0, 3, 5, 8, 1, 4, 2, 7, 6].
#
# Returns: list of ordered indices of this separation plane.
# """
# return self._ordered_indices
#
# @property
# def inv_ordered_indices(self):
# return self._inv_ordered_indices
@property
def permutations(self):
"""
Permutations used for this separation plane algorithm.
Returns: List of permutations to be performed.
"""
return self._permutations
@property
def ref_separation_perm(self):
"""
Ordered indices of the separation plane.
Examples:
For a separation plane of type 2|4|3, with plane_points indices [0, 3, 5, 8] and
point_groups indices [1, 4] and [2, 7, 6], the list of ordered indices is :
[0, 3, 5, 8, 1, 4, 2, 7, 6].
Returns: list of ordered indices of this separation plane.
"""
return self._ref_separation_perm
@property
def argsorted_ref_separation_perm(self):
"""
"Arg sorted" ordered indices of the separation plane.
This is used in the identification of the final permutation to be used.
Returns: list of the "arg sorted" ordered indices of the separation plane.
"""
return self._argsorted_ref_separation_perm
def safe_separation_permutations(self, ordered_plane=False, ordered_point_groups=None, add_opposite=False):
"""
Simple and safe permutations for this separation plane.
This is not meant to be used in production. Default configuration for ChemEnv does not use this method.
Args:
ordered_plane: Whether the order of the points in the plane can be used to reduce the
number of permutations.
ordered_point_groups: Whether the order of the points in each point group can be used to reduce the
number of permutations.
add_opposite: Whether to add the permutations from the second group before the first group as well.
Returns: List of safe permutations.
"""
s0 = list(range(len(self.point_groups[0])))
plane = list(
range(
len(self.point_groups[0]),
len(self.point_groups[0]) + len(self.plane_points),
)
)
s1 = list(
range(
len(self.point_groups[0]) + len(self.plane_points),
len(self.point_groups[0]) + len(self.plane_points) + len(self.point_groups[1]),
)
)
ordered_point_groups = [False, False] if ordered_point_groups is None else ordered_point_groups
def rotate(s, n):
return s[-n:] + s[:-n]
if ordered_plane and self.ordered_plane:
plane_perms = [rotate(plane, ii) for ii in range(len(plane))]
inv_plane = plane[::-1]
plane_perms.extend([rotate(inv_plane, ii) for ii in range(len(inv_plane))])
else:
plane_perms = list(itertools.permutations(plane))
if ordered_point_groups[0] and self.ordered_point_groups[0]:
s0_perms = [rotate(s0, ii) for ii in range(len(s0))]
inv_s0 = s0[::-1]
s0_perms.extend([rotate(inv_s0, ii) for ii in range(len(inv_s0))])
else:
s0_perms = list(itertools.permutations(s0))
if ordered_point_groups[1] and self.ordered_point_groups[1]:
s1_perms = [rotate(s1, ii) for ii in range(len(s1))]
inv_s1 = s1[::-1]
s1_perms.extend([rotate(inv_s1, ii) for ii in range(len(inv_s1))])
else:
s1_perms = list(itertools.permutations(s1))
if self._safe_permutations is None:
self._safe_permutations = []
for perm_side1 in s0_perms:
for perm_sep_plane in plane_perms:
for perm_side2 in s1_perms:
perm = list(perm_side1)
perm.extend(list(perm_sep_plane))
perm.extend(list(perm_side2))
self._safe_permutations.append(perm)
if add_opposite:
perm = list(perm_side2)
perm.extend(list(perm_sep_plane))
perm.extend(list(perm_side1))
self._safe_permutations.append(perm)
return self._safe_permutations
@property
def as_dict(self):
"""
Return the JSON serializable dict representation of this SeparationPlane algorithm.
Returns: a JSON serializable dict representation of this SeparationPlane algorithm.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"plane_points": self.plane_points,
"mirror_plane": self.mirror_plane,
"ordered_plane": self.ordered_plane,
"point_groups": self.point_groups,
"ordered_point_groups": self.ordered_point_groups,
"explicit_permutations": [eperm.tolist() for eperm in self.explicit_permutations]
if self.explicit_permutations is not None
else None,
"explicit_optimized_permutations": [eoperm.tolist() for eoperm in self.explicit_optimized_permutations]
if self.explicit_optimized_permutations is not None
else None,
"multiplicity": self.multiplicity,
"other_plane_points": self.other_plane_points,
"minimum_number_of_points": self.minimum_number_of_points,
}
@classmethod
def from_dict(cls, dd):
"""
Reconstructs the SeparationPlane algorithm from its JSON serializable dict representation.
Args:
dd: a JSON serializable dict representation of an SeparationPlane algorithm.
Returns: a SeparationPlane algorithm.
"""
eop = (
[np.array(eoperm) for eoperm in dd["explicit_optimized_permutations"]]
if ("explicit_optimized_permutations" in dd and dd["explicit_optimized_permutations"] is not None)
else None
)
return cls(
plane_points=dd["plane_points"],
mirror_plane=dd["mirror_plane"],
ordered_plane=dd["ordered_plane"],
point_groups=dd["point_groups"],
ordered_point_groups=dd["ordered_point_groups"],
explicit_permutations=[np.array(eperm) for eperm in dd["explicit_permutations"]],
explicit_optimized_permutations=eop,
multiplicity=dd["multiplicity"] if "multiplicity" in dd else None,
other_plane_points=dd["other_plane_points"] if "other_plane_points" in dd else None,
minimum_number_of_points=dd["minimum_number_of_points"],
)
def __str__(self):
out = "Separation plane algorithm with the following reference separation :\n"
out += "[{}] | [{}] | [{}]".format(
"-".join(str(pp) for pp in [self.point_groups[0]]),
"-".join(str(pp) for pp in [self.plane_points]),
"-".join(str(pp) for pp in [self.point_groups[1]]),
)
return out
class CoordinationGeometry:
"""
Class used to store the ideal representation of a chemical environment or "coordination geometry".
"""
# Default value of continuous symmetry measure beyond which no further
# search is performed for the separation plane algorithms
CSM_SKIP_SEPARATION_PLANE_ALGO = 10.0
class NeighborsSetsHints:
"""
Class used to describe neighbors sets hints.
This allows to possibly get a lower coordination from a capped-like model polyhedron.
"""
ALLOWED_HINTS_TYPES = ["single_cap", "double_cap", "triple_cap"]
def __init__(self, hints_type, options):
"""
Constructor for this NeighborsSetsHints.
Args:
hints_type: type of hint (single, double or triple cap)
options: options for the "hinting", e.g. the maximum csm value beyond which no additional
neighbors set could be found from a "cap hint".
"""
if hints_type not in self.ALLOWED_HINTS_TYPES:
raise ValueError('Type "{}" for NeighborsSetsHints is not allowed'.format(type))
self.hints_type = hints_type
self.options = options
def hints(self, hints_info):
"""
Return hints for an additional neighbors set, i.e. the voronoi indices that constitute this new
neighbors set.
Args:
hints_info: Info needed to build new "hinted" neighbors set.
Returns: Voronoi indices of the new "hinted" neighbors set.
"""
if hints_info["csm"] > self.options["csm_max"]:
return []
return object.__getattribute__(self, "{}_hints".format(self.hints_type))(hints_info)
def single_cap_hints(self, hints_info):
"""
Return hints for an additional neighbors set, i.e. the voronoi indices that constitute this new
neighbors set, in case of a "Single cap" hint.
Args:
hints_info: Info needed to build new "hinted" neighbors set.
Returns: Voronoi indices of the new "hinted" neighbors set.
"""
cap_index_perfect = self.options["cap_index"]
nb_set = hints_info["nb_set"]
permutation = hints_info["permutation"]
nb_set_voronoi_indices_perfect_aligned = nb_set.get_neighb_voronoi_indices(permutation=permutation)
cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[cap_index_perfect]
new_site_voronoi_indices = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices.remove(cap_voronoi_index)
return [new_site_voronoi_indices]
def double_cap_hints(self, hints_info):
"""
Return hints for an additional neighbors set, i.e. the voronoi indices that constitute this new
neighbors set, in case of a "Double cap" hint.
Args:
hints_info: Info needed to build new "hinted" neighbors set.
Returns: Voronoi indices of the new "hinted" neighbors set.
"""
first_cap_index_perfect = self.options["first_cap_index"]
second_cap_index_perfect = self.options["second_cap_index"]
nb_set = hints_info["nb_set"]
permutation = hints_info["permutation"]
nb_set_voronoi_indices_perfect_aligned = nb_set.get_neighb_voronoi_indices(permutation=permutation)
first_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[first_cap_index_perfect]
second_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[second_cap_index_perfect]
new_site_voronoi_indices1 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices2 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices3 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices1.remove(first_cap_voronoi_index)
new_site_voronoi_indices2.remove(second_cap_voronoi_index)
new_site_voronoi_indices3.remove(first_cap_voronoi_index)
new_site_voronoi_indices3.remove(second_cap_voronoi_index)
return [
new_site_voronoi_indices1,
new_site_voronoi_indices2,
new_site_voronoi_indices3,
]
def triple_cap_hints(self, hints_info):
"""
Return hints for an additional neighbors set, i.e. the voronoi indices that constitute this new
neighbors set, in case of a "Triple cap" hint.
Args:
hints_info: Info needed to build new "hinted" neighbors set.
Returns: Voronoi indices of the new "hinted" neighbors set.
"""
first_cap_index_perfect = self.options["first_cap_index"]
second_cap_index_perfect = self.options["second_cap_index"]
third_cap_index_perfect = self.options["third_cap_index"]
nb_set = hints_info["nb_set"]
permutation = hints_info["permutation"]
nb_set_voronoi_indices_perfect_aligned = nb_set.get_neighb_voronoi_indices(permutation=permutation)
first_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[first_cap_index_perfect]
second_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[second_cap_index_perfect]
third_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[third_cap_index_perfect]
new_site_voronoi_indices1 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices2 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices3 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices4 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices5 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices6 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices7 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices1.remove(first_cap_voronoi_index)
new_site_voronoi_indices2.remove(second_cap_voronoi_index)
new_site_voronoi_indices3.remove(third_cap_voronoi_index)
new_site_voronoi_indices4.remove(second_cap_voronoi_index)
new_site_voronoi_indices4.remove(third_cap_voronoi_index)
new_site_voronoi_indices5.remove(first_cap_voronoi_index)
new_site_voronoi_indices5.remove(third_cap_voronoi_index)
new_site_voronoi_indices6.remove(first_cap_voronoi_index)
new_site_voronoi_indices6.remove(second_cap_voronoi_index)
new_site_voronoi_indices7.remove(first_cap_voronoi_index)
new_site_voronoi_indices7.remove(second_cap_voronoi_index)
new_site_voronoi_indices7.remove(third_cap_voronoi_index)
return [
new_site_voronoi_indices1,
new_site_voronoi_indices2,
new_site_voronoi_indices3,
new_site_voronoi_indices4,
new_site_voronoi_indices5,
new_site_voronoi_indices6,
new_site_voronoi_indices7,
]
def as_dict(self):
"""
A JSON serializable dict representation of this NeighborsSetsHints
"""
return {"hints_type": self.hints_type, "options": self.options}
@classmethod
def from_dict(cls, dd):
"""
Reconstructs the NeighborsSetsHints from its JSON serializable dict representation.
Args:
dd: a JSON serializable dict representation of a NeighborsSetsHints.
Returns: a NeighborsSetsHints.
"""
return cls(hints_type=dd["hints_type"], options=dd["options"])
def __init__(
self,
mp_symbol,
name,
alternative_names=None,
IUPAC_symbol=None,
IUCr_symbol=None,
coordination=None,
central_site=np.zeros(3),
points=None,
solid_angles=None,
permutations_safe_override=False,
deactivate=False,
faces=None,
edges=None,
algorithms=None,
equivalent_indices=None,
neighbors_sets_hints=None,
):
"""
Initializes one "coordination geometry" according to [Pure Appl. Chem., Vol. 79, No. 10, pp. 1779--1799, 2007]
and [Acta Cryst. A, Vol. 46, No. 1, pp. 1--11, 1990].
Args:
mp_symbol: Symbol used internally for the coordination geometry.
name: Name of the coordination geometry.
alternative_names: Alternative names for this coordination geometry.
IUPAC_symbol: The IUPAC symbol of this coordination geometry.
IUCr_symbol: The IUCr symbol of this coordination geometry.
coordination: The coordination number of this coordination geometry (number of neighboring atoms).
central_site: The coordinates of the central site of this coordination geometry.
points: The list of the coordinates of all the points of this coordination geometry.
solid_angles: The list of solid angles for each neighbor in this coordination geometry.
permutations_safe_override: Computes all the permutations if set to True (overrides the plane separation
algorithms or any other algorithm, for testing purposes)
deactivate: Whether to deactivate this coordination geometry
faces: List of the faces with their vertices given in a clockwise or anticlockwise order, for drawing
purposes.
edges: List of edges, for drawing purposes.
algorithms: Algorithms used to identify this coordination geometry.
equivalent_indices: The equivalent sets of indices in this coordination geometry (can be used to skip
equivalent permutations that have already been performed).
neighbors_sets_hints: Neighors sets hints for this coordination geometry.
"""
self._mp_symbol = mp_symbol
self.name = name
self.alternative_names = alternative_names if alternative_names is not None else []
self.IUPACsymbol = IUPAC_symbol
self.IUCrsymbol = IUCr_symbol
self.coordination = coordination
self.central_site = np.array(central_site)
self.points = points
self._solid_angles = solid_angles
self.permutations_safe_override = permutations_safe_override
# self.plane_safe_permutations = plane_safe_permutations
# self.setup_permutations(permutations)
self.deactivate = deactivate
self._faces = faces
self._edges = edges
self._algorithms = algorithms
if points is not None:
self.centroid = np.mean(np.array(points), axis=0)
else:
self.centroid = None
self.equivalent_indices = equivalent_indices
self.neighbors_sets_hints = neighbors_sets_hints
self._pauling_stability_ratio = None
def as_dict(self):
"""
A JSON serializable dict representation of this CoordinationGeometry.
"""
return {
"mp_symbol": self._mp_symbol,
"name": self.name,
"alternative_names": self.alternative_names,
"IUPAC_symbol": self.IUPACsymbol,
"IUCr_symbol": self.IUCrsymbol,
"coordination": self.coordination,
"central_site": [float(xx) for xx in self.central_site],
"points": [[float(xx) for xx in pp] for pp in self.points] if self.points is not None else None,
"solid_angles": [float(ang) for ang in self._solid_angles] if self._solid_angles is not None else None,
"deactivate": self.deactivate,
"_faces": self._faces,
"_edges": self._edges,
"_algorithms": [algo.as_dict for algo in self._algorithms] if self._algorithms is not None else None,
"equivalent_indices": self.equivalent_indices,
"neighbors_sets_hints": [nbsh.as_dict() for nbsh in self.neighbors_sets_hints]
if self.neighbors_sets_hints is not None
else None,
}
@classmethod
def from_dict(cls, dd):
"""
Reconstructs the CoordinationGeometry from its JSON serializable dict representation.
Args:
dd: a JSON serializable dict representation of a CoordinationGeometry.
Returns: a CoordinationGeometry.
"""
dec = MontyDecoder()
return cls(
mp_symbol=dd["mp_symbol"],
name=dd["name"],
alternative_names=dd["alternative_names"],
IUPAC_symbol=dd["IUPAC_symbol"],
IUCr_symbol=dd["IUCr_symbol"],
coordination=dd["coordination"],
central_site=dd["central_site"],
points=dd["points"],
solid_angles=(
dd["solid_angles"] if "solid_angles" in dd else [4.0 * np.pi / dd["coordination"]] * dd["coordination"]
),
deactivate=dd["deactivate"],
faces=dd["_faces"],
edges=dd["_edges"],
algorithms=[dec.process_decoded(algo_d) for algo_d in dd["_algorithms"]]
if dd["_algorithms"] is not None
else None,
equivalent_indices=dd["equivalent_indices"] if "equivalent_indices" in dd else None,
neighbors_sets_hints=[cls.NeighborsSetsHints.from_dict(nbshd) for nbshd in dd["neighbors_sets_hints"]]
if ("neighbors_sets_hints" in dd and dd["neighbors_sets_hints"] is not None)
else None,
)
def __str__(self):
symbol = ""
if self.IUPAC_symbol is not None:
symbol += " (IUPAC: {s}".format(s=self.IUPAC_symbol)
if self.IUCr_symbol is not None:
symbol += " || IUCr: {s})".format(s=self.IUCr_symbol)
else:
symbol += ")"
elif self.IUCr_symbol is not None:
symbol += " (IUCr: {s})".format(s=self.IUCr_symbol)
outs = [
"Coordination geometry type : {n}{s}\n".format(n=self.name, s=symbol),
" - coordination number : {c}".format(c=self.coordination),
]
if self.points is None:
outs.append("... not yet implemented")
else:
outs.append(" - list of points :")
for pp in self.points:
outs.append(" - {p}".format(p=pp))
outs.append("------------------------------------------------------------")
outs.append("")
return "\n".join(outs)
def __repr__(self):
symbol = ""
if self.IUPAC_symbol is not None:
symbol += " (IUPAC: {s}".format(s=self.IUPAC_symbol)
if self.IUCr_symbol is not None:
symbol += " || IUCr: {s})".format(s=self.IUCr_symbol)
else:
symbol += ")"
elif self.IUCr_symbol is not None:
symbol += " (IUCr: {s})".format(s=self.IUCr_symbol)
outs = [
"Coordination geometry type : {n}{s}\n".format(n=self.name, s=symbol),
" - coordination number : {c}".format(c=self.coordination),
]
outs.append("------------------------------------------------------------")
outs.append("")
return "\n".join(outs)
def __len__(self):
return self.coordination
def set_permutations_safe_override(self, permutations_safe_override):
"""
Setup ChemEnv so that a safe set of permutations are used.
Args:
permutations_safe_override: Whether to use safe permutations.
"""
self.permutations_safe_override = permutations_safe_override
# self.setup_permutations()
# @property
# def csm_skip_algo(self):
# return self.CSM_SKIP_SEPARATION_PLANE_ALGO
@property
def distfactor_max(self):
"""
The maximum distfactor for the perfect CoordinationGeometry.
Returns: Maximum distfactor for the perfect CoordinationGeometry (usually 1.0 for symmetric polyhedrons).
"""
dists = [np.linalg.norm(pp - self.central_site) for pp in self.points]
return np.max(dists) / np.min(dists)
@property
def coordination_number(self):
"""
Returns the coordination number of this coordination geometry.
"""
return self.coordination
@property
def pauling_stability_ratio(self):
"""
Returns the theoretical Pauling stability ratio (rC/rA) for this environment.
"""
if self._pauling_stability_ratio is None:
if self.ce_symbol in ["S:1", "L:2"]:
self._pauling_stability_ratio = 0.0
else:
mindist_anions = 1000000.0
mindist_cation_anion = 1000000.0
for ipt1 in range(len(self.points)): # pylint: disable=C0200
pt1 = np.array(self.points[ipt1])
mindist_cation_anion = min(mindist_cation_anion, np.linalg.norm(pt1 - self.central_site))
for ipt2 in range(ipt1 + 1, len(self.points)):
pt2 = np.array(self.points[ipt2])
mindist_anions = min(mindist_anions, np.linalg.norm(pt1 - pt2))
anion_radius = mindist_anions / 2.0
cation_radius = mindist_cation_anion - anion_radius
self._pauling_stability_ratio = cation_radius / anion_radius
return self._pauling_stability_ratio
@property
def mp_symbol(self):
"""
Returns the MP symbol of this coordination geometry.
"""
return self._mp_symbol
@property
def ce_symbol(self):
"""
Returns the symbol of this coordination geometry.
"""
return self._mp_symbol
def get_coordination_number(self):
"""
Returns the coordination number of this coordination geometry.
"""
return self.coordination
def is_implemented(self):
"""
Returns True if this coordination geometry is implemented.
"""
return bool(self.points)
def get_name(self):
"""
Returns the name of this coordination geometry.
"""
return self.name
@property
def IUPAC_symbol(self):
"""
Returns the IUPAC symbol of this coordination geometry.
"""
return self.IUPACsymbol
@property
def IUPAC_symbol_str(self):
"""
Returns a string representation of the IUPAC symbol of this coordination geometry.
"""
return str(self.IUPACsymbol)
@property
def IUCr_symbol(self):
"""
Returns the IUCr symbol of this coordination geometry.
"""
return self.IUCrsymbol
@property
def IUCr_symbol_str(self):
"""
Returns a string representation of the IUCr symbol of this coordination geometry.
"""
return str(self.IUCrsymbol)
@property
def number_of_permutations(self):
"""
Returns the number of permutations of this coordination geometry.
"""
if self.permutations_safe_override:
return factorial(self.coordination)
if self.permutations is None: # pylint: disable=E1101
return factorial(self.coordination)
return len(self.permutations) # pylint: disable=E1101
def ref_permutation(self, permutation):
"""
Returns the reference permutation for a set of equivalent permutations.
Can be useful to skip permutations that have already been performed.
Args:
permutation: Current permutation
Returns: Reference permutation of the perfect CoordinationGeometry.
"""
perms = []
for eqv_indices in self.equivalent_indices:
perms.append(tuple(permutation[ii] for ii in eqv_indices))
perms.sort()
return perms[0]
@property
def algorithms(self):
"""
Returns the list of algorithms that are used to identify this coordination geometry.
"""
return self._algorithms
def get_central_site(self):
"""
Returns the central site of this coordination geometry.
"""
return self.central_site
def faces(self, sites, permutation=None):
"""
Returns the list of faces of this coordination geometry. Each face is given as a
list of its vertices coordinates.
"""
if permutation is None:
coords = [site.coords for site in sites]
else:
coords = [sites[ii].coords for ii in permutation]
return [[coords[ii] for ii in f] for f in self._faces]
def edges(self, sites, permutation=None, input="sites"):
"""
Returns the list of edges of this coordination geometry. Each edge is given as a
list of its end vertices coordinates.
"""
if input == "sites":
coords = [site.coords for site in sites]
elif input == "coords":
coords = sites
# if permutation is None:
# coords = [site.coords for site in sites]
# else:
# coords = [sites[ii].coords for ii in permutation]
if permutation is not None:
coords = [coords[ii] for ii in permutation]
return [[coords[ii] for ii in e] for e in self._edges]
def solid_angles(self, permutation=None):
"""
Returns the list of "perfect" solid angles Each edge is given as a
list of its end vertices coordinates.
"""
if permutation is None:
return self._solid_angles
return [self._solid_angles[ii] for ii in permutation]
def get_pmeshes(self, sites, permutation=None):
"""
Returns the pmesh strings used for jmol to show this geometry.
"""
pmeshes = []
# _vertices = [site.coords for site in sites]
if permutation is None:
_vertices = [site.coords for site in sites]
else:
_vertices = [sites[ii].coords for ii in permutation]
_face_centers = []
number_of_faces = 0
for face in self._faces:
if len(face) in [3, 4]:
number_of_faces += 1
else:
number_of_faces += len(face)
_face_centers.append(
np.array([np.mean([_vertices[face_vertex][ii] for face_vertex in face]) for ii in range(3)])
)
out = "{}\n".format(len(_vertices) + len(_face_centers))
for vv in _vertices:
out += "{:15.8f} {:15.8f} {:15.8f}\n".format(vv[0], vv[1], vv[2])
for fc in _face_centers:
out += "{:15.8f} {:15.8f} {:15.8f}\n".format(fc[0], fc[1], fc[2])
out += "{:d}\n".format(number_of_faces)
for iface, face in enumerate(self._faces):
if len(face) == 3:
out += "4\n"
elif len(face) == 4:
out += "5\n"
else:
for ii, f in enumerate(face):
out += "4\n"
out += "{:d}\n".format(len(_vertices) + iface)
out += "{:d}\n".format(f)
out += "{:d}\n".format(face[np.mod(ii + 1, len(face))])
out += "{:d}\n".format(len(_vertices) + iface)
if len(face) in [3, 4]:
for face_vertex in face:
out += "{:d}\n".format(face_vertex)
out += "{:d}\n".format(face[0])
pmeshes.append({"pmesh_string": out})
return pmeshes
class AllCoordinationGeometries(dict):
"""
Class used to store all the reference "coordination geometries" (list with instances of the CoordinationGeometry
classes)
"""
def __init__(self, permutations_safe_override=False, only_symbols=None):
"""
Initializes the list of Coordination Geometries.
Args:
permutations_safe_override: Whether to use safe permutations.
only_symbols: Whether to restrict the list of environments to be identified.
"""
dict.__init__(self)
self.cg_list = list()
if only_symbols is None:
with open("{}/coordination_geometries_files/allcg.txt".format(module_dir), "r") as f:
data = f.readlines()
for line in data:
cg_file = "{}/{}".format(module_dir, line.strip())
with open(cg_file, "r") as f:
dd = json.load(f)
self.cg_list.append(CoordinationGeometry.from_dict(dd))
else:
for symbol in only_symbols:
fsymbol = symbol.replace(":", "#")
cg_file = "{}/coordination_geometries_files/{}.json".format(module_dir, fsymbol)
with open(cg_file, "r") as f:
dd = json.load(f)
self.cg_list.append(CoordinationGeometry.from_dict(dd))
self.cg_list.append(CoordinationGeometry(UNKNOWN_ENVIRONMENT_SYMBOL, "Unknown environment", deactivate=True))
self.cg_list.append(CoordinationGeometry(UNCLEAR_ENVIRONMENT_SYMBOL, "Unclear environment", deactivate=True))
if permutations_safe_override:
for cg in self.cg_list:
cg.set_permutations_safe_override(True)
self.minpoints = {}
self.maxpoints = {}
self.separations_cg = {}
for cn in range(6, 21):
for cg in self.get_implemented_geometries(coordination=cn):
if only_symbols is not None and cg.ce_symbol not in only_symbols:
continue
if cn not in self.separations_cg:
self.minpoints[cn] = 1000
self.maxpoints[cn] = 0
self.separations_cg[cn] = {}
for algo in cg.algorithms:
sep = (
len(algo.point_groups[0]),
len(algo.plane_points),
len(algo.point_groups[1]),
)
if sep not in self.separations_cg[cn]:
self.separations_cg[cn][sep] = []
self.separations_cg[cn][sep].append(cg.mp_symbol)
self.minpoints[cn] = min(self.minpoints[cn], algo.minimum_number_of_points)
self.maxpoints[cn] = max(self.maxpoints[cn], algo.maximum_number_of_points)
self.maxpoints_inplane = {cn: max([sep[1] for sep in seps.keys()]) for cn, seps in self.separations_cg.items()}
def __getitem__(self, key):
return self.get_geometry_from_mp_symbol(key)
def __contains__(self, item):
try:
self[item]
return True
except LookupError:
return False
def __repr__(self):
"""
Returns a string with the list of coordination geometries.
"""
outs = [
"",
"#=================================#",
"# List of coordination geometries #",
"#=================================#",
"",
]
for cg in self.cg_list:
outs.append(repr(cg))
return "\n".join(outs)
def __str__(self):
"""
Returns a string with the list of coordination geometries that are implemented.
"""
outs = [
"",
"#=======================================================#",
"# List of coordination geometries currently implemented #",
"#=======================================================#",
"",
]
for cg in self.cg_list:
if cg.is_implemented():
outs.append(str(cg))
return "\n".join(outs)
def get_geometries(self, coordination=None, returned="cg"):
"""
Returns a list of coordination geometries with the given coordination number.
Args:
coordination: The coordination number of which the list of coordination geometries are returned.
returned: Type of objects in the list.
"""
geom = list()
if coordination is None:
for gg in self.cg_list:
if returned == "cg":
geom.append(gg)
elif returned == "mp_symbol":
geom.append(gg.mp_symbol)
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination:
if returned == "cg":
geom.append(gg)
elif returned == "mp_symbol":
geom.append(gg.mp_symbol)
return geom
def get_symbol_name_mapping(self, coordination=None):
"""
Return a dictionary mapping the symbol of a CoordinationGeometry to its name.
Args:
coordination: Whether to restrict the dictionary to a given coordination.
Returns: Dictionary mapping the symbol of a CoordinationGeometry to its name.
"""
geom = {}
if coordination is None:
for gg in self.cg_list:
geom[gg.mp_symbol] = gg.name
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination:
geom[gg.mp_symbol] = gg.name
return geom
def get_symbol_cn_mapping(self, coordination=None):
"""
Return a dictionary mapping the symbol of a CoordinationGeometry to its coordination.
Args:
coordination: Whether to restrict the dictionary to a given coordination.
Returns: Dictionary mapping the symbol of a CoordinationGeometry to its coordination.
"""
geom = {}
if coordination is None:
for gg in self.cg_list:
geom[gg.mp_symbol] = gg.coordination_number
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination:
geom[gg.mp_symbol] = gg.coordination_number
return geom
def get_implemented_geometries(self, coordination=None, returned="cg", include_deactivated=False):
"""
Returns a list of the implemented coordination geometries with the given coordination number.
Args:
coordination: The coordination number of which the list of implemented coordination geometries
are returned.
returned: Type of objects in the list.
include_deactivated: Whether to include CoordinationGeometry that are deactivated.
"""
geom = list()
if coordination is None:
for gg in self.cg_list:
if gg.points is not None and ((not gg.deactivate) or include_deactivated):
if returned == "cg":
geom.append(gg)
elif returned == "mp_symbol":
geom.append(gg.mp_symbol)
else:
for gg in self.cg_list:
if (
gg.get_coordination_number() == coordination
and gg.points is not None
and ((not gg.deactivate) or include_deactivated)
):
if returned == "cg":
geom.append(gg)
elif returned == "mp_symbol":
geom.append(gg.mp_symbol)
return geom
def get_not_implemented_geometries(self, coordination=None, returned="mp_symbol"):
"""
Returns a list of the implemented coordination geometries with the given coordination number.
Args:
coordination: The coordination number of which the list of implemented coordination geometries
are returned.
returned: Type of objects in the list.
"""
geom = list()
if coordination is None:
for gg in self.cg_list:
if gg.points is None:
if returned == "cg":
geom.append(gg)
elif returned == "mp_symbol":
geom.append(gg.mp_symbol)
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination and gg.points is None:
if returned == "cg":
geom.append(gg)
elif returned == "mp_symbol":
geom.append(gg.mp_symbol)
return geom
def get_geometry_from_name(self, name):
"""
Returns the coordination geometry of the given name.
Args:
name: The name of the coordination geometry.
"""
for gg in self.cg_list:
if gg.name == name or name in gg.alternative_names:
return gg
raise LookupError('No coordination geometry found with name "{name}"'.format(name=name))
def get_geometry_from_IUPAC_symbol(self, IUPAC_symbol):
"""
Returns the coordination geometry of the given IUPAC symbol.
Args:
IUPAC_symbol: The IUPAC symbol of the coordination geometry.
"""
for gg in self.cg_list:
if gg.IUPAC_symbol == IUPAC_symbol:
return gg
raise LookupError('No coordination geometry found with IUPAC symbol "{symbol}"'.format(symbol=IUPAC_symbol))
def get_geometry_from_IUCr_symbol(self, IUCr_symbol):
"""
Returns the coordination geometry of the given IUCr symbol.
Args:
IUCr_symbol: The IUCr symbol of the coordination geometry.
"""
for gg in self.cg_list:
if gg.IUCr_symbol == IUCr_symbol:
return gg
raise LookupError('No coordination geometry found with IUCr symbol "{symbol}"'.format(symbol=IUCr_symbol))
def get_geometry_from_mp_symbol(self, mp_symbol):
"""
Returns the coordination geometry of the given mp_symbol.
Args:
mp_symbol: The mp_symbol of the coordination geometry.
"""
for gg in self.cg_list:
if gg.mp_symbol == mp_symbol:
return gg
raise LookupError('No coordination geometry found with mp_symbol "{symbol}"'.format(symbol=mp_symbol))
def is_a_valid_coordination_geometry(self, mp_symbol=None, IUPAC_symbol=None, IUCr_symbol=None, name=None, cn=None):
"""
Checks whether a given coordination geometry is valid (exists) and whether the parameters are coherent with
each other.
Args:
mp_symbol: The mp_symbol of the coordination geometry.
IUPAC_symbol: The IUPAC_symbol of the coordination geometry.
IUCr_symbol: The IUCr_symbol of the coordination geometry.
name: The name of the coordination geometry.
cn: The coordination of the coordination geometry.
"""
if name is not None:
raise NotImplementedError("is_a_valid_coordination_geometry not implemented for the name")
if mp_symbol is None and IUPAC_symbol is None and IUCr_symbol is None:
raise SyntaxError(
"missing argument for is_a_valid_coordination_geometry : at least one of mp_symbol, "
"IUPAC_symbol and IUCr_symbol must be passed to the function"
)
if mp_symbol is not None:
try:
cg = self.get_geometry_from_mp_symbol(mp_symbol)
if IUPAC_symbol is not None:
if IUPAC_symbol != cg.IUPAC_symbol:
return False
if IUCr_symbol is not None:
if IUCr_symbol != cg.IUCr_symbol:
return False
if cn is not None:
if int(cn) != int(cg.coordination_number):
return False
return True
except LookupError:
return False
elif IUPAC_symbol is not None:
try:
cg = self.get_geometry_from_IUPAC_symbol(IUPAC_symbol)
if IUCr_symbol is not None:
if IUCr_symbol != cg.IUCr_symbol:
return False
if cn is not None:
if cn != cg.coordination_number:
return False
return True
except LookupError:
return False
elif IUCr_symbol is not None:
try:
cg = self.get_geometry_from_IUCr_symbol(IUCr_symbol)
if cn is not None:
if cn != cg.coordination_number:
return False
return True
except LookupError:
return True
raise Exception("Should not be here !")
def pretty_print(self, type="implemented_geometries", maxcn=8, additional_info=None):
"""
Return a string with a list of the Coordination Geometries.
Args:
type: Type of string to be returned (all_geometries, all_geometries_latex_images, all_geometries_latex,
implemented_geometries).
maxcn: Maximum coordination.
additional_info: Whether to add some additional info for each coordination geometry.
Returns: String describing the list of coordination geometries.
"""
if type == "all_geometries_latex_images":
mystring = ""
for cn in range(1, maxcn + 1):
mystring += "\\section*{{Coordination {cn}}}\n\n".format(cn=cn)
for cg in self.get_implemented_geometries(coordination=cn, returned="cg"):
mystring += "\\subsubsection*{{{mp} : {name}}}\n\n".format(mp=cg.mp_symbol, name=cg.get_name())
mystring += "IUPAC : {iupac}\n\nIUCr : {iucr}\n\n".format(
iupac=cg.IUPAC_symbol, iucr=cg.IUCr_symbol
)
mystring += "\\begin{center}\n"
mystring += "\\includegraphics[scale=0.15]{{images/{let}_{cif}.png}}\n".format(
let=cg.mp_symbol.split(":")[0], cif=cg.mp_symbol.split(":")[1]
)
mystring += "\\end{center}\n\n"
for cg in self.get_not_implemented_geometries(cn, returned="cg"):
mystring += "\\subsubsection*{{{mp} : {name}}}\n\n".format(mp=cg.mp_symbol, name=cg.get_name())
mystring += "IUPAC : {iupac}\n\nIUCr : {iucr}\n\n".format(
iupac=cg.IUPAC_symbol, iucr=cg.IUCr_symbol
)
elif type == "all_geometries_latex":
mystring = ""
for cn in range(1, maxcn + 1):
mystring += "\\subsection*{{Coordination {cn}}}\n\n".format(cn=cn)
mystring += "\\begin{itemize}\n"
for cg in self.get_implemented_geometries(coordination=cn, returned="cg"):
mystring += "\\item {mp} $\\rightarrow$ {name} ".format(
mp=cg.mp_symbol.replace("_", "\\_"), name=cg.get_name()
)
mystring += "(IUPAC : {iupac} - IUCr : {iucr})\n".format(
iupac=cg.IUPAC_symbol_str,
iucr=cg.IUCr_symbol_str.replace("[", "$[$").replace("]", "$]$"),
)
for cg in self.get_not_implemented_geometries(cn, returned="cg"):
mystring += "\\item {mp} $\\rightarrow$ {name} ".format(
mp=cg.mp_symbol.replace("_", "\\_"), name=cg.get_name()
)
mystring += "(IUPAC : {iupac} - IUCr : {iucr})\n".format(
iupac=cg.IUPAC_symbol_str,
iucr=cg.IUCr_symbol_str.replace("[", "$[$").replace("]", "$]$"),
)
mystring += "\\end{itemize}\n\n"
else:
mystring = "+-------------------------+\n| Coordination geometries |\n+-------------------------+\n\n"
for cn in range(1, maxcn + 1):
mystring += "==>> CN = {cn} <<==\n".format(cn=cn)
if type == "implemented_geometries":
for cg in self.get_implemented_geometries(coordination=cn):
if additional_info is not None:
if "nb_hints" in additional_info:
if cg.neighbors_sets_hints is not None:
addinfo = " *"
else:
addinfo = ""
else:
addinfo = ""
else:
addinfo = ""
mystring += " - {mp} : {name}{addinfo}\n".format(
mp=cg.mp_symbol, name=cg.get_name(), addinfo=addinfo
)
elif type == "all_geometries":
for cg in self.get_geometries(coordination=cn):
mystring += " - {mp} : {name}\n".format(mp=cg.mp_symbol, name=cg.get_name())
mystring += "\n"
return mystring
|
gmatteo/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/coordination_geometries.py
|
Python
|
mit
| 56,971
|
[
"Jmol",
"pymatgen"
] |
4c24076357bad7f266ad84402b145097dc2ffc18b93e6b7a52141670e0dc4d35
|
# -*- coding: utf-8 -*-
#
# precise_spiking.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Comparing precise and grid-based neuron models
----------------------------------------------
In traditional time-driven simulations, spikes are constrained to the
time grid at a user-defined resolution. The precise spiking models
overcome this by handling spikes in continuous time [1]_ and [2]_.
The precise spiking neuron models in NEST include: ``iaf_psc_exp_ps``,
``iaf_psc_alpha_ps`` and ``iaf_psc_delta_ps``.
More detailed information about the precise spiking models can be
found here:
https://www.nest-simulator.org/simulations-with-precise-spike-times/
This example compares the conventional grid-constrained model and the
precise version for an integrate-and-fire neuron model with exponential
post-synaptic currents [2]_.
References
~~~~~~~~~~~
.. [1] Morrison A, Straube S, Plesser HE, Diesmann M. 2007. Exact subthreshold
integration with continuous spike times in discrete-time neural network
simulations. Neural Computation. 19(1):47-79.
https://doi.org/10.1162/neco.2007.19.1.47
.. [2] Hanuschkin A, Kunkel S, Helias M, Morrison A and Diesmann M. 2010. A
general and efficient method for incorporating precise spike times in
globally time-driven simulations. Froniers in Neuroinformatics. 4:113.
https://doi.org/10.3389/fninf.2010.00113
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis, and
# plotting.
import nest
import matplotlib.pyplot as plt
###############################################################################
# Second, we assign the simulation parameters to variables.
simtime = 100.0 # ms
stim_current = 700.0 # pA
resolutions = [0.1, 0.5, 1.0] # ms
###################################################################################
# Now, we simulate the two versions of the neuron models (i.e. discrete-time:
# ``iaf_psc_exp``; precise: ``iaf_psc_exp_ps``) for each of the defined
# resolutions. The neurons use their default parameters and we stimulate them
# by injecting a current using a ``dc_generator`` device. The membrane
# potential is recorded by a ``voltmeter``, the spikes are recorded by
# a ``spike_detector``. The data is stored in a dictionary for later
# use.
data = {}
for h in resolutions:
data[h] = {}
for model in ["iaf_psc_exp", "iaf_psc_exp_ps"]:
nest.ResetKernel()
nest.SetKernelStatus({'resolution': h})
neuron = nest.Create(model)
voltmeter = nest.Create("voltmeter", params={"interval": h})
dc = nest.Create("dc_generator", params={"amplitude": stim_current})
sd = nest.Create("spike_detector")
nest.Connect(voltmeter, neuron)
nest.Connect(dc, neuron)
nest.Connect(neuron, sd)
nest.Simulate(simtime)
vm_status = voltmeter.events
sd_status = sd.events
data[h][model] = {"vm_times": vm_status['times'],
"vm_values": vm_status['V_m'],
"spikes": sd_status['times'],
"V_th": neuron.V_th}
###############################################################################
# After simulation, we plot the results from the simulation. The figure
# illustrates the membrane potential excursion of the two models due to
# injected current simulated for 100 ms for a different timestep in each panel.
# The blue line is the voltage trace of the discrete-time neuron, the red line
# is that of the precise spiking version of the same model.
#
# Please note that the temporal differences between the traces in the different
# panels is caused by the different resolutions used.
colors = ["#3465a4", "#cc0000"]
for v, h in enumerate(sorted(data)):
plot = plt.subplot(len(data), 1, v + 1)
plot.set_title("Resolution: {0} ms".format(h))
for i, model in enumerate(data[h]):
times = data[h][model]["vm_times"]
potentials = data[h][model]["vm_values"]
spikes = data[h][model]["spikes"]
spikes_y = [data[h][model]["V_th"]] * len(spikes)
plot.plot(times, potentials, "-", c=colors[i], ms=5, lw=2, label=model)
plot.plot(spikes, spikes_y, ".", c=colors[i], ms=5, lw=2)
if v == 2:
plot.legend(loc=4)
else:
plot.set_xticklabels('')
plt.show()
|
weidel-p/nest-simulator
|
pynest/examples/precise_spiking.py
|
Python
|
gpl-2.0
| 5,092
|
[
"NEURON"
] |
211791df98bb1e13c427d7976b7cc8372bdf6d1adb208a6e482315d0475dab84
|
#!/bin/env/python
import collections
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pathlib as pl
# from . import files
from . import amm_results2 as res
# from . import amm_methods as ameth
# sb.set_context('poster')
# sb.set_context('talk')
# sb.set_cmap('tab10')
FIGS_SAVE_DIR = pl.Path('../figs/amm')
USE_FONT = 'DejaVu Sans'
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = [USE_FONT]
# to avoid type3 fonts; 42 = truetype, which is more flexible than type1
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def fix_ticks():
# recover from seaborn white style messing this up
plt.rcParams['xtick.bottom'] = True
plt.rcParams['ytick.left'] = True
if not os.path.exists(FIGS_SAVE_DIR):
FIGS_SAVE_DIR.mkdir(parents=True)
def set_seaborn_style(stylename):
sb.set_style(stylename)
fix_ticks()
def save_fig(name):
# plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.png'),
# dpi=300, bbox_inches='tight')
plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.pdf'),
bbox_inches='tight')
def _xlabel_for_xmetric(x_metric):
return {'d': 'Sketch Size',
'secs': 'Time (s)',
'muls': 'Number of Multiplies',
'nlookups': 'Number of Lookups',
'ops': 'Number of Operations',
'Latency': 'Latency (ms)',
'Speedup': 'Speedup Over Exact Matrix Multiply',
'NormalizedTime': 'Normalized Latency',
'Throughput': 'Throughput (elements/s)'}[x_metric]
def _ylabel_for_xmetric(y_metric):
if y_metric == 'Relative Accuracy':
return 'Normalized\nAccuracy'
if y_metric == 'Accuracy':
return 'Classification\nAccuracy'
return y_metric
def add_ylabels_on_right(axes, fmt, vals):
for i, ax in enumerate(axes):
lbl = fmt.format(vals[i])
ax2 = ax.twinx()
ax2.get_xaxis().set_visible(False)
ax2.yaxis.set_label_position('right')
ax2.set_ylabel(lbl, fontsize=14, family=USE_FONT, labelpad=5)
sb.despine(ax=ax2, top=True, left=True, bottom=True, right=True)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_yticks([])
ax2.tick_params(axis='y', which='y', length=0)
def scan_speed_fig(save=True):
# ================================ data cleaning
df = res.scan_timings()
name_map = collections.OrderedDict()
# name_map['mithral scan'] = 'Mithral'
name_map['mithral scan'] = 'MADDNESS'
# name_map['mithral scan'] = 'Maddness'
# name_map['bolt scan uint8'] = 'Bolt\nCheating'
name_map['bolt scan safe uint16'] = 'Bolt'
name_map['popcount scan'] = 'Popcount'
name_map['pq scan'] = 'PQ / OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
df = res.melt_times(df)
# alright, can't get stds to show without really screwing with stuff
# times = np.array(df['time'])
# times += np.random.randn(len(df['time'])) * .1 # get 1px for stds
# # mask = df['algo'] == 'PQ / OPQ'
# mask = df['B'] == 64
# df['time'].loc[mask] = times[mask]
df['thruput'] = df['N'] * df['M'] / df['time']
df['thruput'] /= 1e6 # just use units of billions; times are in ms
# df['thruput'] *= 1e3 # times are in ms
# ================================ fig creation
sb.set_context("talk")
# fig, ax = plt.subplots(1, 1, figsize=(8, 5))
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
axes = [ax]
sb.barplot(data=df, x='algo', y='thruput', units='timing_trial',
hue='B', hue_order=[8, 16, 32, 64], order=name_map.values(),
ax=ax, ci='sd')
# ------------------------ clean up / format axes
for ax in axes[:-1]:
# remove x labels except for bottom axis
plt.setp(ax.get_xticklabels(), visible=False)
ax.get_xaxis().set_visible(False)
handles, labels = axes[0].get_legend_handles_labels()
labels = ['8B Codes', '16B Codes', '32B Codes', '64B Codes']
# labels = ['8 Bytes', '16 Bytes', '32 Bytes', '64 Bytes']
# labels = ['8B', '16B', '32B', '64B']
plt.figlegend(handles, labels, loc='lower center', ncol=4, fontsize=14)
for ax in axes:
ax.set_ylabel('Billion Dot Products/s', family=USE_FONT)
ax.get_legend().remove()
# ------------------------ have bottom / top axes print title, x info
axes[0].set_title('Speed of f() Functions for Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=20)
# # get and set them again so we can make the first one bold; can't make
# # it bold beforehand because need a tick lbl object, not a string
# xlabels = list(axes[-1].get_xticklabels())
# xlabels[0].set_weight('bold')
# # axes[-1].set_xticklabels(xlabels, rotation=60, ha='right')
# axes[-1].set_xticklabels(xlabels)
axes[-1].tick_params(axis='x', which='major', pad=4)
axes[-1].set_xlabel("", labelpad=-30)
ax.xaxis.set_ticks_position('none')
# ------------------------ save / show plot
plt.tight_layout()
# plt.subplots_adjust(bottom=.21)
plt.subplots_adjust(bottom=.23)
if save:
save_fig('scan_speed')
else:
plt.show()
def encode_speed_fig(save=True):
# ================================ data cleaning
df = res.encode_timings()
df = df.loc[df['algo'] != 'mithral encode i16']
# print("df ours f32: ", df.loc[df['algo'].str.lower().str.strip() == 'mithral encode f32'])
# print("df ours f32: ", df.loc[df['algo'].str.lower().str.strip() == 'mithral encode i8'])
# print(df)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
name_map = collections.OrderedDict()
# name_map['mithral encode i8'] = r'$\bf{Mithral}$ $\bf{i8}$')
# name_map['mithral encode i8'] = r'$\bf{Mithral}$ $\bf{i8}$')
# name_map['mithral encode i8'] = 'Mithral i8'
# name_map['mithral encode i16'] = 'Mithral i16' # no i16 in plot
# name_map['mithral encode f32'] = 'Mithral f32'
# name_map['mithral encode i8'] = 'MADDNESS i8'
# name_map['mithral encode f32'] = 'MADDNESS f32'
name_map['mithral encode f32'] = 'MADDNESS'
name_map['bolt encode'] = 'Bolt'
name_map['pq encode'] = 'PQ'
name_map['opq encode'] = 'OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
df = res.melt_times(df, ntimes=5)
order = 'MADDNESS Bolt PQ OPQ'.split()
# df['thruput'] = df['N'] * df['D'] / df['time']
# df['thruput'] = df['N'] / (df['time'] * .001) # rows/sec
time_secs = (df['time'] * .001)
df['elemsz'] = 4
df['elemsz'].loc[df['algo'].str.endswith('i8')] = 1
df['elemsz'].loc[df['algo'].str.endswith('i16')] = 2
df['thruput'] = df['N'] * df['D'] * df['elemsz'] / time_secs # GB/sec
df['thruput'] /= 1e9 # convert to GB
# df['thruput'] /= 1e6 # just use units of billions; times are in ms
# full_byte_per_codebook = df['algo'].isin(['PQ', 'OPQ'])
# df['B'] = df['C'].values / 2
# # cvals = df['C'].loc[full_byte_per_codebook]
# df['B'].loc[full_byte_per_codebook] = df['C'].loc[full_byte_per_codebook]
# df['B'] = df['B'].astype(np.int)
# # print("df.cols: ", df.columns)
# print(df)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
# ================================ fig creation
sb.set_context('talk')
# sb.set_style('darkgrid')
# sb.set_style('white')
set_seaborn_style('white')
# use_nbytes = [8, 16, 32, 64]
use_nbytes = [8, 16, 32]
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 8), sharey=True)
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 6.5), sharey=True)
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 7), sharey=True)
fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 6.5), sharey=True)
for i, nbytes in enumerate(use_nbytes):
data = df.loc[df['B'] == nbytes]
# print("df.cols: ", df.columns)
# print(data)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
order = name_map.values()
dashes = {name: ([] if name.lower().startswith('maddness') else
mpl.rcParams['lines.dashed_pattern'])
for name in order}
# dashes = None
# sb.lineplot(data=data, x='D', y='thruput', hue='algo',
# sb.lineplot(data=data, x='D', y='thruput', hue='algo', units='timing_trial',
sb.lineplot(data=data, x='D', y='thruput', hue='algo',
# ax=axes[i], ci='sd', estimator=None, hue_order=order,
ax=axes[i], ci='sd', estimator='mean', hue_order=order,
# ax=axes[i], ci=None, estimator='mean', hue_order=order,
style='algo', style_order=order, dashes=dashes,
palette=my_colors_list)
# import sys; sys.exit()
# ------------------------ axis cleanup
axes[0].set_title('Speed of g() Functions\nfor Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=16)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm df column name
# plt.figlegend(handles, labels, loc='lower center', ncol=3, fontsize=13)
plt.figlegend(handles, labels, loc='lower center', ncol=4, fontsize=13)
for ax in axes:
# ax.semilogx()
ax.semilogy()
ax.set_ylim([.02, 1000])
# ax.set_yticks([.1, 1, 10, 100, 1000])
ax.set_yticks([.1, 10, 1000])
ax.get_legend().remove()
# ax.set_ylabel('Billions of\nScalars Encoded/s',
# ax.set_ylabel('Scalars Encoded/s\n(Billions)',
# ax.set_ylabel('Scalars Encoded\nper Second (Billions)',
# ax.set_ylabel('Scalars Encoded\nper Second',
# ax.set_ylabel('Scalars Encoded/s',
# ax.set_ylabel('Rows Encoded/s',
ax.set_ylabel('Encoding\nSpeed (GB/s)',
family=USE_FONT, fontsize=14)
for ax in axes[:-1]:
# remove x labels except for bottom axis
ax.tick_params(axis='x', which='x', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("", visible=False)
# ax.get_xaxis().set_visible(False)
# ax.get_xticklabels().set_visible(False)
axes[-1].set_xlabel('Number of Columns in Matrix A',
family=USE_FONT, fontsize=14)
# add byte counts on the right
add_ylabels_on_right(axes, "{}B Encodings", use_nbytes)
plt.tight_layout()
# plt.subplots_adjust(bottom=.18, hspace=.15)
# plt.subplots_adjust(bottom=.19, hspace=.15)
plt.subplots_adjust(bottom=.17, hspace=.15)
# plt.subplots_adjust(bottom=.21, hspace=.15)
if save:
save_fig('encode_speed')
else:
plt.show()
def lut_speed_fig(save=True):
# ================================ data cleaning
df = res.lut_timings()
name_map = collections.OrderedDict()
# name_map['mithral lut dense'] = '$\bf{Mithral}$'
# name_map['mithral lut sparse'] = '$\bf{Mithral}$'
name_map['mithral lut dense'] = 'MADDNESS'
name_map['mithral lut sparse'] = 'MADDNESS'
name_map['bolt lut'] = 'Bolt'
name_map['pq lut'] = 'PQ'
name_map['opq lut'] = 'OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
# print(df[:20])
# df['lutconst'] = df['lutconst'].str.strip().astype(np.float).astype(np.int)
# print("df.dtypes", df.dtypes)
# import sys; sys.exit()
names = list(df['algo'])
consts = np.array(df['lutconst'])
# print("len(names)", len(names))
# print("len(consts)", len(consts))
mithral_const_to_name = collections.OrderedDict()
mithral_const_to_name[-1] = 'MADDNESS, L = ∞'
mithral_const_to_name[4] = 'MADDNESS, L = 4'
mithral_const_to_name[2] = 'MADDNESS, L = 2'
mithral_names = list(mithral_const_to_name.values())
# add lut constant into the name for mithral variations
new_names = []
ismithral = []
for i, name in enumerate(names):
if not name.startswith('Mithral'):
new_names.append(name)
ismithral.append(False)
continue
# const = consts[i]
# const = "{:d}".format(int(const)) if const > 0 else "∞"
# new_names.append(f"{name}, L = {const}")
new_names.append(mithral_const_to_name[int(consts[i])])
ismithral.append(True)
# print("len(new_names)", len(new_names))
df['algo'] = new_names
df['ismithral'] = ismithral
df = res.melt_times(df, ntimes=5)
# df = res.melt_times(df, ntimes=3) # TODO rerun with ntrials=5
# print(df)
df['thruput'] = df['N'] * df['D'] / df['time']
# df['thruput'] /= 1e6 # just use units of billions; times are in ms
# # TODO rm once we have updated results
# mask = df['algo'].isin(('PQ', 'OPQ'))
# df['B'] = -1 # create placeholder col
# df['B'].loc[mask] = df['C'].loc[mask]
# df['B'].loc[~mask] = df['C'].loc[~mask] / 2
# ================================ fig creation
sb.set_context('talk')
# sb.set_style('darkgrid')
# sb.set_style('white')
set_seaborn_style('white')
# use_nbytes = [8, 16, 32, 64]
use_nbytes = [8, 16, 32]
fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 8), sharey=True)
order = [mithral_names[2], 'Bolt',
mithral_names[1], 'PQ',
mithral_names[0], 'OPQ']
dashes = {k: ('-' if k in mithral_names else '--') for k in order}
# dashes = {k: ('solid' if k in mithral_names else 'dashed') for k in order}
# dashes = {k: (None if k in mithral_names else [3, 3]) for k in order}
# dashes = True
# print(dashes)
# import sys; sys.exit()
for i, nbytes in enumerate(use_nbytes):
data = df.loc[df['B'] == nbytes]
ax = axes[i]
# print(f"------------------------ {nbytes}B")
# manual version
# for algo in order:
# subdf = data.loc[df['algo'] == algo]
# print("plotting algo: ", algo)
# x = subdf['D'].as_matrix()
# y = subdf['thruput'].as_matrix()
# sort_idxs = np.argsort(x)
# x, y = x[sort_idxs], y[sort_idxs]
# ax.plot(x, y, dashes[algo], label=algo)
dashes = {name: ([] if name.lower().startswith('mithral') else
mpl.rcParams['lines.dashed_pattern'])
for name in order}
sb.lineplot(data=data, x='D', y='thruput', hue='algo',
units='timing_trial', ax=axes[i], ci='sd',
estimator=None, hue_order=order,
style='algo', style_order=order, dashes=dashes)
# sb.lineplot(data=data, x='D', y='thruput', hue='algo', units='timing_trial',
# hue_order=order,
# # hue_order=order, style='algo', style_order=order,
# # dashes=True,
# style='ismithral', style_order=[True, False], dashes=True,
# ax=axes[i], ci='sd', estimator=None)
# ------------------------ axis cleanup
axes[0].set_title('Speed of h() Functions\nfor Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=18)
# for ax in axes:
# print("ax handles, labels: ")
# print(ax.get_legend_handles_labels())
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm df column name
# handles, labels = handles[:-3], labels[:-3] # rm ismithral
plt.figlegend(handles, labels, loc='lower center', ncol=3, fontsize=13)
for ax in axes:
# ax.semilogx()
ax.semilogy()
ax.get_legend().remove()
ax.set_ylabel('Scalars Encoded/s',
family=USE_FONT, fontsize=14)
for ax in axes[:-1]:
# remove x labels except for bottom axis
ax.tick_params(axis='x', which='x', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("", visible=False)
axes[-1].set_xlabel('Number of Rows in Matrix B',
family=USE_FONT, fontsize=14)
# add byte counts on the right
add_ylabels_on_right(axes, "{}B Encodings", use_nbytes)
plt.tight_layout()
plt.subplots_adjust(bottom=.18, hspace=.15)
if save:
save_fig('lut_speed')
else:
plt.show()
def lotsa_colors_cmap(value):
assert 0 <= value <= 1 # if this throws, I don't understand cmaps
if value < .3333:
return plt.get_cmap('tab20')(3 * value)
elif value < .6666:
return plt.get_cmap('tab20b')((3 * value) - 1)
else:
return plt.get_cmap('tab20c')((3 * value) - 2)
# def my_tab10(value):
# assert 0 <= value <= 1
# value = int(value * 10)
# perm = [3, 1, 2, 4, 5, 6, 7, 8, 9] # make red first, then orange
# value = perm[value]
# return plt.get_cmap('tab10')((value / 10.) + .01)
# def my_cmap(value):
my_colors_list = (plt.get_cmap('Set1').colors
+ plt.get_cmap('Set3').colors[:1] # skip light yellow
+ plt.get_cmap('Set3').colors[2:]
+ plt.get_cmap('Dark2').colors[:6])
# my_colors_list = my_colors_list[:5] + () my_colors_list[6:] # rm bright yellow
# new_yellow = (240./255, 230./255, 140./255)
new_yellow = (204. / 255, 204. / 255, 0. / 255)
# print(type(my_colors_list))
# print(my_colors_list)
my_colors_list = my_colors_list[:5] + (new_yellow,) + my_colors_list[6:]
# print(type(my_colors_list))
# print(my_colors_list)
# import sys; sys.exit()
# DEFAULT_PLOT_METHODS = ('Mithral', 'MithralPQ', 'Brute Force', 'Bolt',
# DEFAULT_PLOT_METHODS = ('MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'OSNAP', 'PCA', 'SparsePCA',
# 'Rademacher', 'RandGauss', 'OrthoGauss')
DEFAULT_PLOT_METHODS = (
'MADDNESS', 'MADDNESS-PQ', 'Exact', 'ScalarQuantize', 'Bolt',
# 'MADDNESS', 'Exact', 'ScalarQuantize', 'Bolt',
# 'FastJL', 'HashJL', 'PCA', 'RandGauss', 'SparsePCA')
'FastJL', 'HashJL', 'PCA', 'SparsePCA')
# 'FastJL', 'HashJL', 'PCA', 'SparsePCA')
# 'MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'PCA', 'RandGauss', 'SparsePCA')
def lineplot(data, ax, x_metric, y_metric, units=None, scatter=False,
# plot_methods=None):
plot_methods=DEFAULT_PLOT_METHODS, first_two_same_marker=True,
**kwargs):
estimator = 'mean' if units is None else None
if plot_methods is not None:
data = data.loc[data['method'].isin(set(plot_methods))]
order = plot_methods
else:
# order = 'Ours Bolt Exact PQ SVD FD-AMM CD'.split()
# order = [m for m in order if m in data['Method'].unique()]
order = list(data['method'].unique())
# move_methods_to_front = ['Ours', 'OursPQ', 'Brute Force']
# move_methods_to_front = ['Mithral', 'MithralPQ', 'Brute Force']
mithral_methods = [method for method in order
# if method.lower().startswith('mithral')][::-1]
if method.lower().startswith('maddness')][::-1]
move_methods_to_front = mithral_methods[:]
# move_methods_to_front.append('Brute Force')
move_methods_to_front.append('Exact')
for elem in move_methods_to_front[:]:
if elem in order:
order.remove(elem)
else:
move_methods_to_front.remove(elem)
order = move_methods_to_front + sorted(order)
order = [method for method in order if method in data['method'].unique()]
# order = plot_methods
# order = list(data['method'].unique())
# have to specify markers or seaborn freaks out because it doesn't
# have enough of them
# filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h',
# 'H', 'D', 'd', 'P', 'X')
# use_markers = ('*', '*', 's') + (
initial_markers = ('D', 'D', 's') if first_two_same_marker else ('D', 's')
use_markers = initial_markers + (
'o', 'v', '^', '<', '>', '8', 'p', 'h', 'd', 'P', 'X', '*', 'D')
if scatter:
# sb.violinplot(cut=0, saturation=1, linewidth=.001, scale='width', inner='box',
# data['Speedup'] *= 1 + (np.random.randn(len(data['Speedup'])) / 100)
sb.scatterplot(alpha=.25, # seems to suck the least
data=data, x=x_metric, y=y_metric, hue='method',
style='method', style_order=order, hue_order=order,
markers=use_markers, estimator=estimator,
# units=units, estimator=estimator, markers=use_markers,
palette=my_colors_list, ax=ax)
# sb.boxplot(linewidth=.1, width=2, whis=999,
# sb.stripplot(alpha=.25, orient='v', jitter=False,
# data=data, x=x_metric, y=y_metric, hue='method', hue_order=order,
# palette=my_colors_list, ax=ax)
return
kwargs.setdefault('ci', 'sd')
sb.lineplot(data=data, x=x_metric, y=y_metric, hue='method',
# style='method', style_order=order[::-1], hue_order=order[::-1],
style='method', style_order=order, hue_order=order,
markers=use_markers, estimator=estimator,
# units=units, estimator=estimator, markers=use_markers,
dashes=False, palette=my_colors_list, ax=ax, **kwargs)
lines = ax.get_lines()
for i, line in enumerate(lines):
line.set_zorder(10 - i)
# def cifar_fig(save=False, x_metric='Throughput', y_metric='Accuracy'):
def cifar_fig(save=False, x_metric='Speedup', y_metric='Accuracy'):
df10 = res.cifar10_amm()
df100 = res.cifar100_amm()
sb.set_context('poster')
# fig, axes = plt.subplots(2, 1, figsize=(11, 13.5), sharex=True)
# fig, axes = plt.subplots(2, 1, figsize=(11, 10), sharex=True)
fig, axes = plt.subplots(2, 1, figsize=(11, 8.5), sharex=True)
# plot_methods = ['Mithral', 'MithralPQ', 'Brute Force', 'Bolt',
# plot_methods = ['MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'OSNAP', 'PCA', 'SparsePCA',
# 'Rademacher', 'RandGauss', 'OrthoGauss']
# # df10 = df10.loc[df10['method'].isin(set(plot_methods))]
# df100 = df100.loc[df100['method'].isin(set(plot_methods))]
# df10 = df10.loc[df10['method'] != 'OrthoGauss']
# df100 = df100.loc[df100['method'] != 'OrthoGauss']
lineplot(df10, axes[0], x_metric=x_metric, y_metric=y_metric)
lineplot(df100, axes[1], x_metric=x_metric, y_metric=y_metric)
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Classifiers', family=USE_FONT)
axes[0].set_title('CIFAR-10', family=USE_FONT)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric), family=USE_FONT)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl, family=USE_FONT)
axes[1].set_title('CIFAR-100', family=USE_FONT)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
for ax in axes.ravel():
ax.get_legend().remove()
if y_metric == 'Accuracy':
axes[0].set_ylim([.09, .96])
axes[1].set_ylim([.009, .73])
elif y_metric == '1 - NMSE':
axes[0].set_ylim([0, 1.02])
axes[1].set_ylim([0, 1.02])
# axes[1].get_legend().remove()
# axes[1].get_legend().remove()
plt.figlegend(handles, labels, loc='lower center', ncol=3)
# if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
for ax in axes:
if x_metric == 'Speedup':
ax.set_xlim([.94, ax.get_xlim()[1]])
elif x_metric == 'NormalizedTime':
ax.set_xlim([ax.get_xlim()[0], 1.06])
plt.tight_layout()
# plt.subplots_adjust(top=.91, bottom=.24)
plt.subplots_adjust(top=.89, bottom=.32)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('cifar_{}_{}'.format(x_metric, y_metric))
# save_fig('cifar_{}_{}_no_maddnesspq'.format(x_metric, y_metric))
def fig1(save=False, x_metric='Speedup', y_metric='Accuracy'):
df10 = res.cifar10_amm()
df100 = res.cifar100_amm()
sb.set_context('poster')
fig, axes = plt.subplots(2, 1, figsize=(11, 10), sharex=True)
# df10['method'] = df10['method'].str.replace('Mithral', 'HashMul')
# replace_names_dict = {'Mithral': 'Ours',
replace_names_dict = {'MADDNESS': 'Ours',
# 'SparsePCA': '2nd best (Mairal et al.)',
# 'HashJL': '3rd best (Dasgupta et al.)',
'SparsePCA': 'Mairal et al.',
'HashJL': 'Dasgupta et al.',
'Exact': 'Exact Matrix Multiply'
}
# print("--- about to run the rename we care about")
df10 = res.rename_values_in_col(df10, 'method', replace_names_dict)
df100 = res.rename_values_in_col(df100, 'method', replace_names_dict)
# df10['method'] = df10['method'].str.replace(replace_names_dict)
# df100['method'] = df100['method'].str.replace(replace_names_dict)
# print('df10 methods: ', df10['method'].unique())
# import sys; sys.exit()
# plot_methods = ['Ours', '2nd best', '3rd best', 'Exact Matrix Multiply']
# plot_methods = ['Ours', 'Mairal et al.', 'Dasgupta et al.', 'Exact Matrix Multiply']
plot_methods = ['Ours', 'Exact Matrix Multiply', 'Mairal et al.', 'Dasgupta et al.']
# plot_methods = ['Ours', '3rd best', '2nd best', 'Exact Matrix Multiply']
# plot_methods = ['Mithral', 'SparsePCA', 'HashJL', 'Brute Force']
# df10 = df10.loc[df10['method'].isin(set(plot_methods))]
# df100 = df100.loc[df100['method'].isin(set(plot_methods))]
# df10 = df10.loc[df10['method'] != 'OrthoGauss']
# df100 = df100.loc[df100['method'] != 'OrthoGauss']
lineplot(df10, axes[0], x_metric=x_metric, y_metric=y_metric,
plot_methods=plot_methods, ci=None, first_two_same_marker=False)
lineplot(df100, axes[1], x_metric=x_metric, y_metric=y_metric,
plot_methods=plot_methods, ci=None, first_two_same_marker=False)
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Classifiers', family=USE_FONT)
axes[0].set_title('CIFAR-10', family=USE_FONT)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric), family=USE_FONT)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl, family=USE_FONT)
axes[1].set_title('CIFAR-100', family=USE_FONT)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
for ax in axes.ravel():
ax.get_legend().remove()
if y_metric == 'Accuracy':
axes[0].set_ylim([.09, .96])
axes[1].set_ylim([.009, .73])
elif y_metric == '1 - NMSE':
axes[0].set_ylim([0, 1.02])
axes[1].set_ylim([0, 1.02])
# axes[1].get_legend().remove()
# axes[1].get_legend().remove()
plt.figlegend(handles, labels, loc='lower center', ncol=2)
# if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
for ax in axes:
if x_metric == 'Speedup':
ax.set_xlim([.94, ax.get_xlim()[1]])
elif x_metric == 'NormalizedTime':
ax.set_xlim([ax.get_xlim()[0], 1.06])
plt.tight_layout()
plt.subplots_adjust(top=.89, bottom=.23)
save_fig('fig1')
def caltech_fig(x_metric='Speedup', y_metric='1 - NMSE'):
# df = res.caltech_amm()
# df = res.caltech_amm()
df0 = res.caltech_amm(filt='sobel')
df1 = res.caltech_amm(filt='dog5x5')
# print("df cols: ", df.columns)
sb.set_context('poster')
# fig, ax = plt.subplots(1, 1, figsize=(11, 6))
fig, axes = plt.subplots(2, 1, figsize=(12, 8))
# axes = [ax]
# is_mithral = df['method'].str.startswith('Mithral')
# is_exact = df['method'] == 'Brute Force'
# others_to_keep = df['method'].isin(['Brute Force', 'PCA', 'SparsePCA'])
# others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
# df = df.loc[is_mithral | others_to_keep] # others suck too hard
# df = df.loc[~(df['method'].isin(['Mithral, L = 2', 'Mithral, L = 4']))]
# df['method'].loc[df['method'] == 'Mithral, L = ∞'] = 'Mithral'
# print("df0 uniq methods: ", df0['method'].unique())
# print("df1 uniq methods: ", df1['method'].unique())
# import sys; sys.exit()
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA', 'OSNAP']
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA', 'HashJL', 'OSNAP', 'FastJL']
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA']
# keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'SparsePCA', 'PCA']
# even scalar quantize is slower than custom exact matmul; note that
# in the 5x5 plot, it's occluded by maddness (near perfect mse, but
# slightly to the left of 1x speedup)
# keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'ScalarQuantize', 'SparsePCA']
keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'SparsePCA']
df0 = df0.loc[df0['method'].isin(keep_methods)]
df1 = df1.loc[df1['method'].isin(keep_methods)]
# print("df0 kept methods: ", df0['method'].unique())
# print("df1 kept methods: ", df1['method'].unique())
# print("df1 scalar quantize numbers: ", df1.loc[df1['method'] == 'ScalarQuantize'])
# import sys; sys.exit()
# print("df1:\n", df1.loc[(df1['method'] == 'MithralPQ') & df1['task_id'].str.contains('509')])
# import sys; sys.exit()
# lineplot(df, ax, x_metric=x_metric, y_metric=y_metric, units=None)
lineplot(df0, axes[0], x_metric=x_metric, y_metric=y_metric,
plot_methods=keep_methods)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=y_metric,
plot_methods=keep_methods)
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# plt.figlegend(handles, labels, loc='lower center', ncol=2)
# plt.figlegend(handles, labels, loc='lower center', ncol=4)
plt.figlegend(handles, labels, loc='lower center', ncol=len(keep_methods))
# plt.suptitle('Approximating an Image Filter')
for ax in axes:
ax.set_xlabel(_xlabel_for_xmetric(x_metric), fontsize=20)
ax.set_ylabel(y_metric)
ax.get_legend().remove()
ax.set_ylim([-.01, 1.01])
ax.plot([1, 1], ax.get_ylim(), 'k--')
# for ax in axes[:-1]:
# # remove x labels except for bottom axis
# plt.setp(ax.get_xticklabels(), visible=False)
# ax.get_xaxis().set_visible(False)
axes[0].set_title('Approximating a Sobel Filter', y=1.02, fontsize=28)
axes[1].set_title('Approximating a Gaussian Filter', y=1.02, fontsize=28)
# plt.subplots_adjust(top=.91, bottom=.37)
plt.tight_layout()
# plt.subplots_adjust(bottom=.26, hspace=.72) # with ncol=2
plt.subplots_adjust(bottom=.22, hspace=.7) # with ncol=2
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('caltech_{}_{}'.format(x_metric, '1 - NMSE'))
# save_fig('caltech_sobel_{}_{}'.format(x_metric, '1 - NMSE'))
# save_fig('caltech_dog_{}_{}'.format(x_metric, '1 - NMSE'))
# def ucr_fig(x_metric='Speedup', y_metric='Accuracy'):
# def ucr_fig(x_metric='Speedup', y_metric='Change in Accuracy'):
def ucr_fig(x_metric='Speedup', y_metric='Relative Accuracy'):
# df = res.ucr_amm()
# df = res.ucr_amm(k=64)
# df = res.ucr_amm(k=128)
# df = res.ucr_amm(k=256)
df0 = res.ucr_amm(k=64)
df1 = res.ucr_amm(k=128)
df2 = res.ucr_amm(k=256)
sb.set_context('poster')
# fig, ax = plt.subplots(1, 1, figsize=(11, 8))
fig, axes = plt.subplots(3, 1, figsize=(12, 13), sharex=True)
# axes = [ax]
# df = df.loc[df['task_id'].str.lower().str.contains('starlight')]
# df = df.loc[df['method'] == 'Mithral']
# # df = df.loc[df['method'] == 'MithralPQ']
# # df = df.loc[df['ncodebooks'] == 4]
# df = df['Accuracy acc_orig acc_orig_1nn ncodebooks method task_id'.split() + ['Relative Accuracy']]
# df.reset_index(inplace=True, drop=True)
# print(df)
# import sys; sys.exit()
# df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# print("uniq N, D, M: ")
# print(df['N'].unique())
# print(df['D'].unique())
# print(df['M'].unique())
# df_brute = df.loc[df['method'] == 'Brute Force']
# print("uniq times from brute force: ", df_brute['time'].unique())
# print("df Brute:\n", df_brute['N D M method normalized_mse Accuracy time'.split()])
# import sys; sys.exit()
# df['acc']
# # TODO put in results cleaning after debug
# if 'Accuracy' in df.columns:
# # df['Relative Accuracy'] = df['Accuracy'] / (df['acc_orig'] + 1e-20)
# # # note that relative accuracy can actually be higher if errors
# # # happen to compensate for incorrect classification sometimes
# # print("max relative acc: ", df['Relative Accuracy'].values.max())
# # # assert df['Relative Accuracy'].values.max() <= 1.000001
# # acc_orig field is supposed to capture this, but I messed it up for
# # 1nn so this will also work
# tid2acc = {}
# exactdf = df.loc[df['method'] == 'Brute Force']
# for tid in df['task_id'].unique():
# subdf = exactdf.loc[exactdf['task_id'] == tid]
# if subdf.shape[0] != 1:
# print(f"tid = {tid} gives bad subdf:\n", subdf)
# tid2acc[tid] = subdf['Accuracy'].values[0]
# df['BaseAccuracy'] = [tid2acc[tid] for tid in df['task_id']]
# df['Relative Accuracy'] = df['Accuracy'] / df['BaseAccuracy']
# df = df.loc[~(df['method'].isin(['Mithral, L = 2', 'Mithral, L = 4']))]
# # df['method'].loc[df['method'] == 'Mithral, L = ∞'] = 'Mithral'
# df0 = df0.loc[df0['method'] != 'Brute Force']
# df1 = df1.loc[df1['method'] != 'Brute Force']
# df2 = df2.loc[df2['method'] != 'Brute Force']
# print(df.columns)
# import sys; sys.exit()
def clean_df(df):
df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# is_mithral = df['method'].str.startswith('Mithral')
# is_mithral = df['method'] == 'Mithral'
is_mithral = df['method'] == 'MADDNESS'
# # is_exact = df['method'] == 'Brute Force'
others_to_keep = df['method'].isin([
'PCA', 'SparsePCA', 'Bolt', 'HashJL', 'OSNAP'])
# others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
return df.loc[is_mithral | others_to_keep]
df0 = clean_df(df0)
df1 = clean_df(df1)
df2 = clean_df(df2)
# df = df.loc[df['method'] == 'Brute Force']
# df['not_mse'] = 1. - df['normalized_mse']
# df = df.loc[df['not_mse'] < 2]
lineplot(df0, axes[0], x_metric=x_metric, y_metric=y_metric, scatter=True)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=y_metric, scatter=True)
lineplot(df2, axes[2], x_metric=x_metric, y_metric=y_metric, scatter=True)
plt.suptitle('Approximating an RBF Kernel Classifier')
axes[-1].set_xlabel(_xlabel_for_xmetric(x_metric))
# ax.set_ylabel('1. - NMSE')
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
plt.figlegend(handles, labels, loc='lower center', ncol=3)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric))
ax.get_legend().remove()
ax.semilogx()
ax.set_xlim([.9, ax.get_xlim()[1]])
# ax.set_ylim([.2, 1.1])
# plt.plot([1, 1], ax.get_ylim(), 'k--')
plt.tight_layout()
plt.subplots_adjust(top=.94, bottom=.25)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('ucr_{}_{}'.format(x_metric, y_metric))
def ucr_fig2(x_metric='Speedup', y_metric='Relative Accuracy',
# problem='softmax'):
problem='rbf'):
# df0 = res.ucr_amm(k=64)
# df1 = res.ucr_amm(k=128)
# df2 = res.ucr_amm(k=256)
df = res.ucr_amm(k=128, problem=problem)
sb.set_context('poster')
# fig, axes = plt.subplots(3, 1, figsize=(12, 13), sharex=True)
fig, axes = plt.subplots(3, 1, figsize=(12, 12), sharex=True)
# df = res.ucr_amm(k=128, problem='rbf')
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
def clean_df(df):
df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
return df
# # is_mithral = df['method'].str.startswith('Mithral')
# is_mithral = df['method'] == 'Mithral'
# # # is_exact = df['method'] == 'Brute Force'
# others_to_keep = df['method'].isin([
# 'PCA', 'SparsePCA', 'Bolt', 'HashJL', 'OSNAP'])
# # others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
# return df.loc[is_mithral | others_to_keep]
def frac_above_thresh(df, thresh):
return res.frac_above_thresh(
df, x_metric, y_metric, 'method', 'task_id', thresh)
df = clean_df(df)
# df0['frac_above_thresh'] = frac_above_thresh(df, .5)
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
# df = df.loc[df['method'] == 'SparsePCA']
# print(df.groupby('task_id')['Speedup'].count())
# import sys; sys.exit()
y_frac_thresholds = [.5, .75, .95]
df0 = frac_above_thresh(df, y_frac_thresholds[0])
df1 = frac_above_thresh(df, y_frac_thresholds[1])
df2 = frac_above_thresh(df, y_frac_thresholds[2])
# # print(df0['frac_above_thresh'])
# print(df0)
# # for row in df0.iterrows():
# # print(row)
# # print(df0.unstack(0))
# print("df cols: ", df.columns)
# print("df0 cols: ", df0.columns)
# print("uniq methods: ", df['method'].unique())
# df = df.loc[df['method'] == 'Brute Force']
# df['not_mse'] = 1. - df['normalized_mse']
# df = df.loc[df['not_mse'] < 2]
ycol = 'frac_above_thresh'
lineplot(df0, axes[0], x_metric=x_metric, y_metric=ycol, scatter=False)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=ycol, scatter=False)
lineplot(df2, axes[2], x_metric=x_metric, y_metric=ycol, scatter=False)
kind = 'a Softmax' if problem == 'softmax' else 'an RBF Kernel'
plt.suptitle(f'Approximating {kind} Classifier')
axes[-1].set_xlabel(_xlabel_for_xmetric(x_metric))
# ax.set_ylabel('1. - NMSE')
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
plt.figlegend(handles, labels, loc='lower center', ncol=3)
for i, ax in enumerate(axes):
# ax.set_ylabel(_ylabel_for_xmetric(y_metric))
# ax.set_ylabel("Fraction of Datasets\nWith Relative Acc > "
# f"{y_frac_thresholds[i]}")
# ax.set_ylabel(f"Fraction with Relative\nAccuracy> {y_frac_thresholds[i]}")
ax.set_ylabel(f"Fraction > {y_frac_thresholds[i]}")
ax.get_legend().remove()
ax.semilogx()
ax.set_xlim([.9, ax.get_xlim()[1]])
ax.set_ylim([0, 1.03])
# ax.set_ylim([.2, 1.1])
# plt.plot([1, 1], ax.get_ylim(), 'k--')
plt.tight_layout()
# plt.subplots_adjust(top=.94, bottom=.25)
plt.subplots_adjust(top=.94, bottom=.22)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('ucr2_{}_{}_{}'.format(x_metric, y_metric, problem))
def main():
scan_speed_fig()
encode_speed_fig()
lut_speed_fig()
fig1()
ucr_fig2()
caltech_fig()
# caltech_fig(y_metric='1 - NMSE')
# caltech_fig(x_metric='ops', y_metric='1 - NMSE')
cifar_fig()
# cifar_fig(y_metric='1 - NMSE')
# cifar_fig(x_metric='ops')
# cifar_fig(x_metric='ops', y_metric='1 - NMSE')
# ucr_fig2(x_metric='ops', y_metric='1 - NMSE')
# ucr_fig2(x_metric='ops')
# cifar_fig(y_metric='1 - NMSE')
# ucr_fig2()
# ucr_fig2(y_metric='1 - NMSE')
if __name__ == '__main__':
main()
|
dblalock/bolt
|
experiments/python/amm_figs2.py
|
Python
|
mpl-2.0
| 41,039
|
[
"Gaussian"
] |
14dbf2e3a286a344f5f2d49f29a0c657156ecf89d4a4b827e76bc61d7069694d
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RA4(RPackage):
"""Automated Affymetrix Array Analysis Umbrella Package."""
homepage = "https://www.bioconductor.org/packages/a4/"
git = "https://git.bioconductor.org/packages/a4.git"
version('1.24.0', commit='79b5143652176787c85a0d587b3bbfad6b4a19f4')
depends_on('r@3.4.0:3.4.9', when='@1.24.0')
depends_on('r-a4base', type=('build', 'run'))
depends_on('r-a4preproc', type=('build', 'run'))
depends_on('r-a4classif', type=('build', 'run'))
depends_on('r-a4core', type=('build', 'run'))
depends_on('r-a4reporting', type=('build', 'run'))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-a4/package.py
|
Python
|
lgpl-2.1
| 1,848
|
[
"Bioconductor"
] |
49b81c0f0b876e2a5706bba572e449a0a87d8acc418bc13aa3711a91b0e95aa8
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGeoquery(RPackage):
"""The NCBI Gene Expression Omnibus (GEO) is a public repository of
microarray data. Given the rich and varied nature of this resource,
it is only natural to want to apply BioConductor tools to these data.
GEOquery is the bridge between GEO and BioConductor."""
homepage = "https://bioconductor.org/packages/GEOquery/"
url = "https://git.bioconductor.org/packages/GEOquery"
list_url = homepage
version('2.42.0', git='https://git.bioconductor.org/packages/GEOquery', commit='c26adef8d3ddbd6932a3170f2f84f6e4327641fb')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
depends_on('r-httr', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.42.0')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-geoquery/package.py
|
Python
|
lgpl-2.1
| 2,093
|
[
"Bioconductor"
] |
fd34d56bac1923e7bd63f284909443f3379c4dfc2b02b20b77168d1d397c2d9b
|
#!/usr/local/sci/bin/python
# PYTHON23
#
# Author: Kate Willett
# Created: 22 June 2015
# Last update: 15 Feb 2021
# Location: /home/h04/hadkw/HadISDH_Code/CLIMEXPLORER/PYTHON/
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This reads in a netCDF file of a gridded dataset and plots a map of either a single field or a processed field (average etc)
# It can renormalise data to the desired climatology period (and deal with actual monthly means)
# It can plot:
# a single month,
# an average of months within a year (or adjacent for DJF) up to annual - set minimum data presence
# an average of single months across a period of years (climatology) - set minimum data presence
# an average of several months across a period of years (climatology) up to annual - set minimum data presence
# It also saves a netCDF of the plot for replotting if desired.
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt: (may not all be required actually)
# import matplotlib.pyplot as plt
# import numpy as np
# import numpy.ma as ma
# import sys, os
# import scipy.stats
# import struct
# import cartopy.crs as ccrs # NO MORE BASEMAP
# import cartopy.feature as cpf
# import datetime as dt
# from matplotlib.dates import date2num,num2date
# from scipy.io import netcdf
# import matplotlib.colors as mc
# import matplotlib.cm as mpl_cm
# import pdb # pdb.set_trace() or c
#
# Kate's:
# from ReadNetCDF import GetGrid - written by Kate Willett, reads in any netCDF grid, can cope with multiple fields
# from ReformatGrids import GetAnomalies - written by Kate Willett, converts gridded fields to anomalies or renormalises to a different period
# import SelectSlice - written by Kate Willett, takes a single month or average of months/years for each gridbox
#
# -----------------------
# DATA
# -----------------------
# The code requires a monthly resolution gridded dataset (anomalies or monthly means) in netCDF format
# Currently it works with:
# candidate='HadISDH.landq.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_cf'
# candidate='HadISDH.landRH.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_cf'
# candidate='HadISDH.landT.2.0.1.2014p_FLATgridRAW5by5_JAN2015_cf'
# candidate='HadISDH.landT.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_cf'
# candidate='BERKELEY_T_5by519762005clim_anoms_19732014'
# candidate='GISS_T_5by519762005clim_anoms_19732014'
# candidate='CRUTEM.4.3.0.0.anomalies'
# candidate='GHCNM_18802014'
#
# Current directories are:
# INDIRC='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/GRIDS/'
# #INDIRC='/data/local/hadkw/HADCRUH2/UPDATE2014/OTHERDATA/'
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Set up your choices:
# PctDataPresent = 0.6 # can be any value between 0 and 1
# StYr = 1973 # Start Year of dataset (assumes start in January)
# EdYr = 2014 # End Year of dataset
# EdMon = 10 # End Month of dataset (1 to 12)
# # Are you reading in actuals or anomalies? 'anoms' for anomalies, 'abs' for actuals
# vartype = 'anoms'
# # Choose your start year of climatology: 0 if not relevant, not important if DoReClim = False
# ChooseClimSt = 1976
# # Choose your start year of climatology: 0 if relevant, not important if DoReClim = False
# ChooseClimEd = 2005
# # Does the data need renormalising to the chosen climatology? True if need to renormalise, False if not
# DoReClim = False
# # Are we plotting anomalies or absolute values? 'actual' for actual values, 'anomaly' for anomaly values
# PlotType = 'actual'
# # Are we saving the plot as a netCDF too?
# SaveData = True
#
# Set up correct file paths:
# # Input Directories:
# INDIRC='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/GRIDS/'
# #INDIRC='/data/local/hadkw/HADCRUH2/UPDATE2014/OTHERDATA/'
# # Output Directories:
# OUTDIR='/data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/MAPS/'
#
# Set up correct files and info bundles:
# # Files and bundle example:
# # Input file:
# candidate = 'HadISDH.landq.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_cf'
# # Output file:
# OUTPLOT = 'Map_HadISDH.landq.2.0.1.2014p_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+MonStr+YrStr
# # LIST Variable name used in netCDF file, based on vartype
# NameOfVar = ['q_'+vartype]
# # Long name of variable for plot
# VarName = 'Specific Humidity'
# # Unit for variable - used for plot
# Unit = 'g kg$^{-1}$'
# # Plot label
# Namey = 'HadISDH.landq.2.0.1.2014p '+PlotType+' '+MonStr+' '+YrStr
# # Latitude info: LIST of variable name OR number of latitudes, start latitude
# LatInfo = ['latitude'] # [36,-87.5]
# # Longitude info: LIST of variable name OR number of longitudes, start longitude
# LonInfo = ['longitude'] # [72,-177.5])
# # Choice of colourmap for plot and whether it should be flipped: 'flip','noflip'
# ColourMapChoice = ('BrBG','noflip')
# # Optional hardwire info for plot - can be 0,0,0,' ' - use if wanting to make identical colour bars
# PlotInfo = [0,28,14,' '] # vmin,vmax,nsteps,letter_for_plot_label
# Then from the command line select var, month (range) and year (range):
# --var q (rh, t, td, tw, e, dpd
# --mchoice 0 (0 to 11)
# --mchoiceend '' (0 to 11 if you want to plot an average over months or each individual month in a range)
# --mmult False (True for every month in range, False for the average)
# --ychoice 2020 (a single year)
# --ychoiceend '' (a single year if you want to plot an average over years or each individual year in a range)
# --ymult False (True for every year in range, False for the average)
#
# > module load scitools/default-current
# > python PlotAnyMap_JUN2015.py --var q --mchoice 0 --mchoiceend '' --mmult False --ychoice 2020 --ychoiceend '' --ymult False
#
# Or use ./submit_spice_PlotAnyMap.bash
#
# -----------------------
# OUTPUT
# -----------------------
# An .eps and .png map
# OUTDIR = '/scratch/hadkw/UPDATE2020/IMAGES/MAPS/'
# OUTDIR = '/data/users/hadkw/WORKING_HADISDH/UPDATE2020/IMAGES/MAPS/'
# Example output filename built from run choices:
# OUTPLOT = 'Map_HadISDH.landq.4.3.0.2020f_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+PlotType+MonStr+YrStr
#
# A .nc
# OUTDIR = '/scratch/hadkw/UPDATE2020/STATISTICS/GRIDS/'
# OUTDIR = '/data/users/hadkw/WORKING_HADISDH/UPDATE2020/STATISTICS/GRIDS/'
# Example output filename built from run choices:
# OUTPLOT = 'Map_HadISDH.landq.4.3.0.2020f_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+PlotType+MonStr+YrStr
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 3 (15th Feb 2015)
# ---------
#
# Enhancements
# Now call var month and year choices from command line so that it can run on spice in batch mode
# Use with 'submit_apice_PlotAnyMap.bash'
# Now also has SaveData=True option to output a netCDF file - replaces function of old IDL code
#
# Changes
#
# Bug fixes
#
#
# Version 2 (20th July 2020)
# ---------
#
# Enhancements
# Now Python 3 rather than 2.7
#
# Changes
#
# Bug fixes
#
#
# Version 1 (13th October 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import sys, os, getopt
import scipy.stats
import struct
import cartopy.crs as ccrs # NO MORE BASEMAP
import cartopy.feature as cpf
import datetime as dt
from matplotlib.dates import date2num,num2date
#from scipy.io import netcdf
from netCDF4 import Dataset
import matplotlib.colors as mc
import matplotlib.cm as mpl_cm
import pdb # pdb.set_trace() or c
from ReadNetCDF import GetGrid4
from ReformatGrids import GetAnomalies
from SelectSlice import SelectSlice
# Set in stone things:
MonArr = ['January','February','March','April','May','June','July','August','September','October','November','December']
VarDict = dict([('q',['q','g kg$^{-1}$','Specific Humidity',('BrBG','noflip'),dict([('MinVal',-2.),('MaxVal',2.),('StepVal',9.),('LetterVal','')])]),
('rh',['RH','%rh','Relative Humidity',('BrBG','noflip'),dict([('MinVal',-12.),('MaxVal',12.),('StepVal',9.),('LetterVal','')])]),
('e',['e','hPa','Vapour Pressure',('BrBG','noflip'),dict([('MinVal',-2.),('MaxVal',2.),('StepVal',9.),('LetterVal','')])]),
('t',['T','$^{o}$C','Air Temperature',('coolwarm','noflip'),dict([('MinVal',-2.5),('MaxVal',2.5),('StepVal',9.),('LetterVal','')])]),
('tw',['Tw','$^{o}$C','Wetbulb Temperature',('BrBG','noflip'),dict([('MinVal',-2.),('MaxVal',2.),('StepVal',9.),('LetterVal','')])]),
('td',['Td','$^{o}$C','Dew Point Temperature',('BrBG','noflip'),dict([('MinVal',-2.),('MaxVal',2.),('StepVal',9.),('LetterVal','')])]),
('dpd',['DPD','$^{o}$C','Dew Point Depression',('BrBG','flip'),dict([('MinVal',-3.),('MaxVal',3.),('StepVal',9.),('LetterVal','')])])])
# EDITABLES!!!
# Domain if HadISDH
Domain = 'land' # 'land', 'marine',' blend'
#Domain = 'blend' # 'land', 'marine',' blend'
#Domain = 'marine' # 'land', 'marine',' blend'
# Version
lversion = '4.3.1.2020f'
lExpType = 'FLATgridHOM5by5'
mversion = '1.1.0.2020f'
mExpType = 'BClocalSHIP5by5both'
bversion = '1.1.1.2020f'
bExpType = 'FLATgridHOMBClocalSHIPboth5by5'
# Working Year (e.g. for HadISDH this would correspond to UPDATE<YYYY>
workingyear = '2020'
# Missing data
mdi = -1e30 # may set up as masked arrays later
# Set up initial run choices
# What is the minimum percent of data present for each gridbox value? This can be any value between >0 and 1
PctDataPresent = 0.6
# Start Year of dataset (assumes January)
StYr = 1973
# End Year of dataset
EdYr = 2020
# End Month of dataset (assumes it starts in January) - 1 to 12
EdMon = 12
# Are you reading in actuals or anomalies? 'anoms' or 'anomalies' for anomalies, 'abs' for actuals
vartype = 'anoms' # Check PlotType = 'actuals' if we want to plot actuals
#vartype = 'abs' # Check PlotType = 'actuals' if we want to plot actuals
# Choose your start year of climatology: 0 if not relevant, not important if DoReClim = False
ChooseClimSt = 1981
# Choose your start year of climatology: 0 if relevant, not important if DoReClim = False
ChooseClimEd = 2010
# Does the data need renormalising to the chosen climatology? True if need to renormalise, False if not
DoReClim = False
# Are we plotting anomalies or absolute values? 'actual' for actual values, 'anomaly' for anomaly values
PlotType = 'anomaly' # Check RangeDict is set to 0,0,0,'?' or it will default to anomalies ranges that have been hard wired.
#PlotType = 'actual' # Check RangeDict is set to 0,0,0,'?' or it will default to anomalies ranges that have been hard wired.
# Are we saving the data for te plot?
SaveData = True # True for save to netCDF, False for plot only
# Set up forced vmin,vmax,nsteps and plotlabel letter if needed or leave as default
RangeDict = dict([('MinVal',0.),('MaxVal',0.),('StepVal',0.),('LetterVal','c)')]) # default = 0., 0. ,0., ''
#Fix vals ,
#RangeDict = VarDict[Var][4]
# Set up directories:
INDIRC = '/scratch/hadkw/UPDATE'+workingyear+'/STATISTICS/GRIDS/'
#INDIRC = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+workingyear+'/STATISTICS/GRIDS/'
#INDIRC = '/data/local/hadkw/HADCRUH2/UPDATE'+workingyear+'/OTHERDATA/'
#OUTDIRP = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+workingyear+'/IMAGES/MAPS/'
OUTDIRP = '/scratch/hadkw/UPDATE'+workingyear+'/IMAGES/MAPS/'
#OUTDIRD = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+workingyear+'/STATISTICS/GRIDS/'
OUTDIRD = '/scratch/hadkw/UPDATE'+workingyear+'/STATISTICS/GRIDS/'
#DataSetShort = 'BERKELEY_T_'
#DataSet = 'BERKELEY_T_'
#candidate = 'BERKELEY_T_5by519762005clim_anoms_19732014'
#NameOfVar = ['t_'+vartype]
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#DataSetShort = 'BERKELEY_T_'
#DataSet = 'BERKELEY_T_'
#candidate = 'GISS_T_5by519762005clim_anoms_19732014'
#NameOfVar = ['t_'+vartype]
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#DataSetShort = 'BERKELEY_T_'
#DataSet = 'BERKELEY_T_'
#candidate = 'CRUTEM.4.3.0.0.anomalies'
#NameOfVar = ['t_'+vartype]
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#DataSetShort = 'BERKELEY_T_'
#DataSet = 'BERKELEY_T_'
#candidate = 'HadCRUT.4.4.0.0.median'
#NameOfVar = ['temperature_anomaly']
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#DataSet = 'BERKELEY_T_'
#candidate = 'GHCNM_18802014'
#NameOfVar = ['t_'+vartype]
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#************************************************************************
# Subroutines
#************************************************************************
# PlotAnyMap
def PlotAnyMap(TheFile,TheLatList,TheLonList,TheCandData,TheUnitee,TheNamee,TheCmap,TheTypee,TheVar,TheMDI,ThePlotInfo):
''' Create a masked array of variable
Plot on map
Save as eps and png '''
# Create the masked array of trends
MSKTheCandData=ma.masked_where(TheCandData == mdi,TheCandData)
# make 2d arrays of lats and lons
# nudge -2.5 degrees to make them south/west gridbox corners, not centres
# add extra row/column to bound the data
ArrLons,ArrLats=np.meshgrid(TheLonList,TheLatList)
HalfLon=(TheLonList[1]-TheLonList[0])/2.
HalfLat=(TheLatList[1]-TheLatList[0])/2.
LngArrLons,LngArrLats=np.meshgrid(np.append(TheLonList-HalfLon,180.),np.append(TheLatList-HalfLat,90.))
# set up plot
plt.clf()
fig=plt.figure(figsize=(10,6))
plt1=plt.axes([0.05,0.12,0.9,0.8],projection=ccrs.Robinson()) # left, bottom, width, height
plt1.coastlines()
#plt1.set_boundary # not sure what this does? maybe useful for plotting regions?
plt1.gridlines(draw_labels=False) # probably a way to specify these exactly if we want something different to default
# This line background fills the land with light grey
#plt1.add_feature(cpf.LAND, zorder = 0, facecolor = "0.9", edgecolor = "k") # may or may not need this
ext = plt1.get_extent()
# End of CARTOPY
# make up a blue to red (reverse) colour map
cmap=plt.get_cmap(TheCmap[0])
cmaplist=[cmap(i) for i in range(cmap.N)]
#pdb.set_trace()
for loo in range(np.int((cmap.N/2)-20),np.int((cmap.N/2)+20)):
cmaplist.remove(cmaplist[np.int((cmap.N/2)-20)]) # remove the very pale colours in the middle
if (TheCmap[1] == 'flip'): # then reverse the colours
cmaplist.reverse()
cmap=cmap.from_list('this_cmap',cmaplist,cmap.N)
# Is there isn't a hardwired vmin and vmax then make one that is nice
# nsteps always 9 to avoid toomany colours
nsteps = 9
if (ThePlotInfo[2] == 0):
# work out best max and min values for colourbar and try to make them 'nice
# must be an odd number of steps
# for less than 0.1 must be 14 or fewer steps
if (TheTypee == 'anomaly'):
vmax=np.int(np.ceil(np.max(abs(MSKTheCandData))*10))/10.
vmin=-vmax
print(vmin,vmax)
# if (vmax <= 0.3):
# nsteps=np.int((vmax-vmin)/0.05)+1
# elif (vmax <= 0.5):
# vmax=np.ceil(np.max(abs(MSKTheCandData))/0.06)*0.06
# vmin=-vmax
# nsteps=np.int((vmax-vmin)/0.06)+1
# elif (vmax <= 1.0):
# vmax=np.ceil(np.max(abs(MSKTheCandData))/0.1)*0.1
# vmin=-vmax
# nsteps=np.int((vmax-vmin)/0.1)+1
# elif (vmax <= 1.4):
# vmax=np.ceil(np.max(abs(MSKTheCandData))/0.2)*0.2
# vmin=-vmax
# nsteps=np.int((vmax-vmin)/0.2)+1
# elif (vmax > 1.4):
# vmax=np.ceil(np.max(abs(MSKTheCandData))/0.3)*0.3
# vmin=-vmax
# nsteps=np.int((vmax-vmin)/0.3)+1
# pdb.set_trace() # stop here and play
else:
# vmax=np.ceil(np.max(abs(MSKTheCandData)))
# vmin=np.floor(np.min(abs(MSKTheCandData)))
vmax=np.ceil(np.max(MSKTheCandData))
vmin=np.floor(np.min(MSKTheCandData))
vrange=vmax-vmin
print(vmin,vmax,vrange)
# if (vmax-vmin < 14):
# nsteps=np.int((vmax-vmin))+1
# elif (vmax <= 21):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(1.5*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(1.5*7)
# nsteps=np.int((vmax-vmin)/1.5)+1
# elif (vmax <= 30):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(2*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(2*7)
# nsteps=np.int((vmax-vmin)/2)+1
# elif (vmax <= 35):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(2.5*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(2.5*7)
# nsteps=np.int((vmax-vmin)/2.5)+1
# elif (vmax <= 42):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(3*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(3*7)
# nsteps=np.int((vmax-vmin)/3)+1
# elif (vmax <= 56):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(4*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(4*7)
# nsteps=np.int((vmax-vmin)/4)+1
# elif (vmax <= 70):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(5*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(5*7)
# nsteps=np.int((vmax-vmin)/5)+1
# elif (vmax <= 84):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(6*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(6*7)
# nsteps=np.int((vmax-vmin)/6)+1
# elif (vmax <= 98):
# vmax=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))+(7*7)
# vmin=(np.floor(np.min(abs(MSKTheCandData)))+(vrange/2.))-(7*7)
# nsteps=np.int((vmax-vmin)/7)+1
# pdb.set_trace() # stop here and play
else:
vmin = ThePlotInfo[0]
vmax = ThePlotInfo[1]
nsteps = ThePlotInfo[2]
print(vmin,vmax,nsteps)
bounds=np.linspace(vmin,vmax,nsteps)
strbounds=["%4.1f" % i for i in bounds]
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
# Only pcolor can do boxing on masked arrays, not really sure why we use pcolormesh at all
grids = plt1.pcolor(LngArrLons,LngArrLats,MSKTheCandData,transform = ccrs.PlateCarree(),cmap=cmap,norm=norm)
# grids=m.pcolor(LngArrLons,LngArrLats,MSKTheCandData,cmap=cmap,norm=norm,latlon='TRUE')
cbax=fig.add_axes([0.05,0.08,0.9,0.03])
cb=plt.colorbar(grids,cax=cbax,orientation='horizontal',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.5,0.01,TheVar+' ('+TheUnitee+')',size=14,ha='center')
# add labals and watermark
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
plt.figtext(0.5,0.95,TheNamee,size=16,ha='center')
# If a letter is supplied then print on plot
if (ThePlotInfo[3] != ' '):
plt.figtext(0.1,0.92,ThePlotInfo[3],size=16,ha='center')
# plt.show()
# stop()
plt.savefig(TheFile+".eps")
plt.savefig(TheFile+".png")
plt.clf()
return #PlotAnyMap
#**********************************************************************
# WriteNetCDF
def WriteNetCDF(Filename,TheGrids,TheMDI,TheVar,TheLongName,TheUnit,TheLats,TheLons):
'''
This function writes out a NetCDF 4 file
INPUTS:
Filename - string file name
TheGrids[:,:] - 2D array of decadal average trends
TheMDI - the missind data value
TheVar - string short name of var q
TheLongName - strong long name of variable
TheUnit - string unit of variable
TheLats[:] - vector of latitudes from -90 to 90
TheLons[:] - vector of longitudes from -180 to 180
OUTPUTS:
None
'''
# No need to convert float data using given scale_factor and add_offset to integers - done within writing program (packV = (V-offset)/scale
# Not sure what this does to float precision though...
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw = Dataset(Filename,'w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Set up the dimension names and quantities
ncfw.createDimension('latitude',len(TheLats))
ncfw.createDimension('longitude',len(TheLons))
# Go through each dimension and set up the variable and attributes for that dimension if needed
MyVarLt = ncfw.createVariable('latitude','f4',('latitude',))
MyVarLt.standard_name = 'latitude'
MyVarLt.long_name = 'latitude'
MyVarLt.units = 'degrees_north'
# MyVarLt.valid_min = np.min(TheLats)
# MyVarLt.valid_max = np.max(TheLats)
MyVarLt.point_spacing = 'even'
MyVarLt.axis = 'X'
MyVarLt[:] = TheLats
MyVarLn = ncfw.createVariable('longitude','f4',('longitude',))
MyVarLn.standard_name = 'longitude'
MyVarLn.long_name = 'longitude'
MyVarLn.units = 'degrees east'
# MyVarLn.valid_min = np.min(TheLons)
# MyVarLn.valid_max = np.max(TheLons)
MyVarLn.point_spacing = 'even'
MyVarLn.axis = 'X'
MyVarLn[:] = TheLons
# Go through each variable and set up the variable attributes
# I've added zlib=True so that the file is in compressed form
# I've added least_significant_digit=4 because we do not need to store information beyone 4 significant figures.
MyVar = ncfw.createVariable('anomalies','f4',('latitude','longitude',),fill_value = TheMDI,zlib=True,least_significant_digit=4)
# MyVar.standard_name = TheStandardName
MyVar.long_name = TheLongName
MyVar.units = TheUnit
# MyVar.valid_min = np.min(TheTrendGrids)
# MyVar.valid_max = np.max(TheTrendGrids)
# MyVar.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVar[:] = TheGrids[:,:]
ncfw.close()
return
#************************************************************************
# MAIN PROGRAM
#************************************************************************
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
var = 'q' # 'q','rh','e','td','tw','t','dpd'
mchoice = 0 # 0 to 11 this is either the month you want plotted or start of the month range to plot (can do 11-1)
mchoiceend = '' # '' for no range of months (single) or end month of rande 0-11 (can do 11-1)
mmult = False # False for an average, True for plotting each individual month of range
ychoice = 1973 # 0this is either the year you want plotted or start of the year range to plot - for 11-1 need year of 11
ychoiceend = '' # '' for no range of years (single) or end year of range
ymult = False # False for an average, True for plotting each individual year of range
try:
opts, args = getopt.getopt(argv, "hi:",
["var=","mchoice=","mchoiceend=","mmult=","ychoice=","ychoiceend=","ymult="])
except getopt.GetoptError:
print('Usage (as strings) PlotAnyMap.py --var <q> --mchoice <0> --mchoiceend <11> --mmult False --ychoice <1973> --ychoiceend <''> --ymult True')
sys.exit(2)
for opt, arg in opts:
if opt == "--var":
try:
Var = arg
except:
sys.exit("Failed: var not a string")
elif opt == "--mchoice":
try:
Mchoice = arg
except:
sys.exit("Failed: mchoice not an integer")
elif opt == "--mchoiceend":
try:
Mchoiceend = arg
except:
sys.exit("Failed: mchoiceend not an integer")
elif opt == "--mmult":
try:
Mmult = arg
except:
sys.exit("Failed: mmult not a boolean")
elif opt == "--ychoice":
try:
Ychoice = arg
except:
sys.exit("Failed: ychoice not an integer")
elif opt == "--ychoiceend":
try:
Ychoiceend = arg
except:
sys.exit("Failed: ychoiceend not an integer")
elif opt == "--ymult":
try:
Ymult = arg
except:
sys.exit("Failed: ymult not a boolean")
# assert year1 != -999 and year2 != -999, "Year not specified."
print(Var,Mchoice,Mchoiceend,Mmult,Ychoice,Ychoiceend,Ymult)
# Set argparse so that key arguments can be passed to this program externally rather than being set above
# Version
if (Domain == 'land'):
Version = lversion
ExpType = lExpType
elif (Domain == 'marine'):
Version = mversion
ExpType = mExpType
elif (Domain == 'blend'):
Version = bversion
ExpType = bExpType
# Select your month of choice, or a range for an average, or a range for looping through
# 0...11 represent Jan..Dec, [2,4] for Mar-Apr-May average, [0,11] for annual average, [11,1] for Dec-Jan-Feb average
# For month ranges than span 11 to 0, December will be taken from the first year of ChooseYr - will NOT work for last year!
if (Mchoiceend != ''):
ChooseMon = [int(Mchoice),int(Mchoiceend)]
else:
ChooseMon = [int(Mchoice)]
if (Mmult == 'True'):
PlotMonMultiple = True # False to plot an average over range, True to plot each individual month in range
else:
PlotMonMultiple = False # False to plot an average over range, True to plot each individual month in range
# Select your year of choice, or a range for an average, or a range for looping through
# 1973...2014 for individual years, [1973,1982] for decadal average etc
if (Ychoiceend != ''):
ChooseYr = [int(Ychoice),int(Ychoiceend)]
else:
ChooseYr = [int(Ychoice)]
if (Ymult == 'True'):
PlotYrMultiple = True # False to plot an average over range, True to plot each individual month in range
else:
PlotYrMultiple = False # False to plot an average over range, True to plot each individual month in range
# Set up files and variable bundles:
# For HadISDH the next bits are automatically populated or uncomment a bundle if its a different dataset
DataSetShort = 'HadISDH.'+Domain+VarDict[Var][0]+'.'+Version
DataSet = DataSetShort+'_'+ExpType
candidate = DataSet+'_anoms8110'
NameOfVar = [Var+'_'+vartype]
LatInfo = ['latitude'] # variable name or number of latitudes, start latitude
LonInfo = ['longitude'] # variable name or number of longitudes, start longitude
# Set up other variables
VarName = VarDict[Var][2]
Unit = VarDict[Var][1]
ColourMapChoice = VarDict[Var][3]
PlotInfo = [RangeDict['MinVal'], RangeDict['MaxVal'], RangeDict['StepVal'], RangeDict['LetterVal']]
NYrs = (EdYr+1)-StYr
NMons = NYrs*12-(12-EdMon)
# read in trend maps
MyFile = INDIRC+candidate+'.nc'
#print(NameOfVar,LatInfo,LonInfo)
TmpCandFields,LatList,LonList = GetGrid4(MyFile,NameOfVar,LatInfo,LonInfo)
# If the data do not end in December then pad the file with missing data
if (EdMon < 12):
#pdb.set_trace()
TmpCandFields = np.concatenate((TmpCandFields,np.reshape(np.repeat(mdi,((12-EdMon)*len(LonList)*len(LatList))),(12-EdMon,len(LatList),len(LonList)))))
#pdb.set_trace()
# force mdi to equal mdi without floating point errors
print(len(TmpCandFields[TmpCandFields < mdi]))
TmpCandFields[TmpCandFields < mdi] = mdi
print(len(TmpCandFields[TmpCandFields < mdi]))
#pdb.set_trace()
# Do you need to renormalise (or create anomalies from actuals?
if (DoReClim):
CandFields = GetAnomalies(TmpCandFields,StYr,EdYr,ChooseClimSt,ChooseClimEd,mdi,PctDataPresent)
else:
CandFields = TmpCandFields
# Now work on plots either singular or multiple
# If we're looping through years then start loop
if (PlotYrMultiple):
for yy in range(ChooseYr[0],ChooseYr[1]+1): # needs extra to include last month within the range
YrStr = str(yy)
# If we're looping through months then start loop
if (PlotMonMultiple):
for mm in range(ChooseMon[0],ChooseMon[1]+1): # needs extra to include last month within the range
MonStr = MonArr[mm]
# Extract chosen month
CandData = SelectSlice(CandFields,StYr,EdYr,[mm],[yy],mdi,PctDataPresent)
# pass to plotter
OUTPLOT = 'Map_'+DataSet+'_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+PlotType+MonStr+YrStr
Namey = DataSetShort+' '+PlotType+' '+MonStr+' '+YrStr
MyFile = OUTDIRP+OUTPLOT
PlotAnyMap(MyFile,LatList,LonList,CandData,Unit,Namey,ColourMapChoice,PlotType,VarName,mdi,PlotInfo)
if (SaveData):
WriteNetCDF(OUTDIRD+OUTPLOT+'.nc',CandData,mdi,Var,VarName,Unit,LatList,LonList)
# If we're not then work on the individual or average
else:
if (len(ChooseMon) == 1):
MonStr = MonArr[ChooseMon[0]]
else:
MonStr = MonArr[ChooseMon[0]]+'-'+MonArr[ChooseMon[1]]
# Extract chosen month
CandData = SelectSlice(CandFields,StYr,EdYr,ChooseMon,[yy],mdi,PctDataPresent)
# pass to plotter
OUTPLOT = 'Map_'+DataSet+'_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+PlotType+MonStr+YrStr
Namey = DataSetShort+' '+PlotType+' '+MonStr+' '+YrStr
MyFile = OUTDIRP+OUTPLOT
PlotAnyMap(MyFile,LatList,LonList,CandData,Unit,Namey,ColourMapChoice,PlotType,VarName,mdi,PlotInfo)
if (SaveData):
WriteNetCDF(OUTDIRD+OUTPLOT+'.nc',CandData,mdi,Var,VarName,Unit,LatList,LonList)
# If we're not looping through years then check month multiples
else:
# If we're looping through months then start loop
if (PlotMonMultiple):
for mm in range(ChooseMon[0],ChooseMon[1]+1): # needs extra to include last month within the range
MonStr = MonArr[mm]
if (len(ChooseYr) == 1):
YrStr = str(ChooseYr[0])
else:
YrStr = str(ChooseYr[0])+'-'+str(ChooseYr[1])
# Extract chosen month
CandData = SelectSlice(CandFields,StYr,EdYr,[mm],ChooseYr,mdi,PctDataPresent)
# pass to plotter
OUTPLOT = 'Map_'+DataSet+'_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+PlotType+MonStr+YrStr
Namey = DataSetShort+' '+PlotType+' '+MonStr+' '+YrStr
MyFile = OUTDIRP+OUTPLOT
PlotAnyMap(MyFile,LatList,LonList,CandData,Unit,Namey,ColourMapChoice,PlotType,VarName,mdi,PlotInfo)
if (SaveData):
WriteNetCDF(OUTDIRD+OUTPLOT+'.nc',CandData,mdi,Var,VarName,Unit,LatList,LonList)
# If we're not then work on the individual or average
else:
if (len(ChooseMon) == 1):
MonStr = MonArr[ChooseMon[0]]
else:
MonStr = MonArr[ChooseMon[0]]+'-'+MonArr[ChooseMon[1]]
if (len(ChooseYr) == 1):
YrStr = str(ChooseYr[0])
else:
YrStr = str(ChooseYr[0])+'-'+str(ChooseYr[1])
# Extract chosen month
CandData = SelectSlice(CandFields,StYr,EdYr,ChooseMon,ChooseYr,mdi,PctDataPresent)
#pdb.set_trace()
# pass to plotter
OUTPLOT = 'Map_'+DataSet+'_clim'+str(ChooseClimSt)+str(ChooseClimEd)+'_'+PlotType+MonStr+YrStr
# Namey = DataSetShort+' '+PlotType+' '+MonStr+' '+YrStr
Namey = ''
MyFile = OUTDIRP+OUTPLOT
PlotAnyMap(MyFile,LatList,LonList,CandData,Unit,Namey,ColourMapChoice,PlotType,VarName,mdi,PlotInfo)
if (SaveData):
WriteNetCDF(OUTDIRD+OUTPLOT+'.nc',CandData,mdi,Var,VarName,Unit,LatList,LonList)
#pdb.set_trace()
print("And, we are done!")
if __name__ == '__main__':
main(sys.argv[1:])
|
Kate-Willett/Climate_Explorer
|
PYTHON/PlotAnyMap.py
|
Python
|
cc0-1.0
| 33,032
|
[
"NetCDF"
] |
a23bf612de5f922893e548009e9bcb0f1ab744657063d310c6cb96fea10bb19a
|
"""
This module contains code to conduct iterative HMMer searches in Goat. From a
starting HMM or MSA, a forward HMMer search is conducted, followed by reverse
BLAST to identify any positive hits. Positive hits are added to the HMM and the
HMMer search is conducted again. This process is iterated until no more hits
are added to the HMM.
"""
|
chris-klinger/Goat
|
analyses/iterative_hmmer_blast.py
|
Python
|
gpl-3.0
| 343
|
[
"BLAST"
] |
d2742c810d229f4c982d559c56f6426650fbbcbc4960b4a6545b044d6c1969ab
|
from django.core.urlresolvers import reverse
from django.db import models
from edc_base.audit_trail import AuditTrail
from edc_base.model.models import BaseUuidModel
from edc_export.models import ExportTrackingFieldsMixin
from edc_sync.models import SyncModelMixin
from lis.specimen.lab_aliquot.managers import AliquotManager
from lis.specimen.lab_aliquot.models import BaseAliquot
from .aliquot_condition import AliquotCondition
from .aliquot_type import AliquotType
from .receive import Receive
class Aliquot(BaseAliquot, SyncModelMixin, ExportTrackingFieldsMixin, BaseUuidModel):
receive = models.ForeignKey(
Receive,
editable=False)
aliquot_type = models.ForeignKey(
AliquotType,
verbose_name="Aliquot Type",
null=True)
aliquot_condition = models.ForeignKey(
AliquotCondition,
verbose_name="Aliquot Condition",
null=True,
blank=True)
is_rejected = models.BooleanField(
verbose_name='rejected',
default=False)
objects = AliquotManager()
history = AuditTrail()
def save(self, *args, **kwargs):
self.subject_identifier = self.receive.registered_subject.subject_identifier
super(Aliquot, self).save(*args, **kwargs)
@property
def specimen_identifier(self):
return self.aliquot_identifier[:-4]
@property
def registered_subject(self):
return self.receive.registered_subject
@property
def visit_code(self):
return self.receive.visit
@property
def subject_visit(self):
MaternalVisit = models.get_model('maternal', 'MaternalVisit')
try:
return MaternalVisit.objects.get(
appointment__visit_definition__code=self.visit_code,
appointment__registered_subject=self.registered_subject)
except MaternalVisit.DoesNotExist:
return None
@property
def subject_requisition(self):
model = self.receive.requisition_model_name
RequisitionModel = models.get_model('mb_lab', model)
try:
return RequisitionModel.objects.get(
requisition_identifier=self.receive.requisition_identifier)
except RequisitionModel.DoesNotExist:
return None
@property
def optional_description(self):
"""See PackingListHelper."""
try:
return self.subject_requisition.optional_description
except AttributeError:
return None
def processing(self):
url = reverse('admin:mb_lab_aliquotprocessing_add')
return '<a href="{0}?aliquot={1}">process</a>'.format(url, self.pk)
processing.allow_tags = True
def related(self):
url = reverse('admin:mb_lab_aliquot_changelist')
return '<a href="{0}?q={1}">related</a>'.format(url, self.receive.receive_identifier)
related.allow_tags = True
class Meta:
app_label = 'mb_lab'
unique_together = (('receive', 'count'), )
|
botswana-harvard/microbiome
|
microbiome/apps/mb_lab/models/aliquot.py
|
Python
|
gpl-2.0
| 2,996
|
[
"VisIt"
] |
4804c58f5d899f3be51046654b31003cc6ae37ea8930bf3a39109066ad0a13aa
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espressopp
def getAllParticles(system, *properties):
"""
returns a list of all particle properties of all particles of the system (currently no atomistic AdResS particles are included)
"""
allParticles = []
maxParticleID = int(espressopp.analysis.MaxPID(system).compute())
pid = 0
while pid <= maxParticleID:
particle = system.storage.getParticle(pid)
part = []
if particle.pos:
for val in properties:
if val.lower() == "id" : part.append(particle.id)
elif val.lower() == "pos" : part.append(particle.pos)
elif val.lower() == "type" : part.append(particle.type)
elif val.lower() == "mass" : part.append(particle.mass)
elif val.lower() == "v" : part.append(particle.v)
elif val.lower() == "f" : part.append(particle.f)
elif val.lower() == "q" : part.append(particle.q)
elif val.lower() == "adrat" : part.append(particle.adrat)
else: raise "unknown particle property: %s"%val
allParticles.append(part)
pid += 1
else:
pid += 1
return allParticles
def getAllBonds(system):
"""
return all bonds of the system (currently only FixedPairLists are supported)
"""
bonds = []
nInteractions = system.getNumberOfInteractions()
for i in range(nInteractions):
if system.getInteraction(i).isBonded():
try:
FixedPairList = system.getInteraction(i).getFixedPairList().getBonds()
j = 0
while j < len(FixedPairList):
fplb = FixedPairList[j]
k = 0
while k < len(fplb):
bonds.append(fplb[k])
k += 1
j += 1
except:
pass
return bonds
|
espressopp/espressopp
|
src/tools/info.py
|
Python
|
gpl-3.0
| 2,821
|
[
"ESPResSo"
] |
76dbf716f61e106b8692dad4122025d3b33058129d51d315cd9c378887e3929b
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
This implements the method described in Tang et al. 2010 PNAS paper,
<http://www.pnas.org/content/107/1/472>
Angiosperm genome comparisons reveal early polyploidy in the monocot lineage
The main pipeline assumes starting with defined synteny blocks in .anchors
format (use compara.synteny.scan()), then segment the chromosomes and cluster
segments according to the matching patterns. Finally the putative ancestral
regions (PAR) are identified and visualized.
"""
import os.path as op
import sys
import logging
from math import log
import numpy as np
from more_itertools import pairwise
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.formats.bed import Bed
from jcvi.formats.blast import BlastLine
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh
def main():
actions = (
("cluster", "cluster the segments"),
("pad", "test and reconstruct candidate PADs"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def make_arrays(blastfile, qpadbed, spadbed, qpadnames, spadnames):
"""
This function makes three matrices: observed, expected and logmp. The logmp
contains the statistical significance for each comparison.
"""
m, n = len(qpadnames), len(spadnames)
qpadorder, spadorder = qpadbed.order, spadbed.order
qpadid = dict((a, i) for i, a in enumerate(qpadnames))
spadid = dict((a, i) for i, a in enumerate(spadnames))
qpadlen = dict((a, len(b)) for a, b in qpadbed.sub_beds())
spadlen = dict((a, len(b)) for a, b in spadbed.sub_beds())
qsize, ssize = len(qpadbed), len(spadbed)
assert sum(qpadlen.values()) == qsize
assert sum(spadlen.values()) == ssize
# Populate arrays of observed counts and expected counts
logging.debug("Initialize array of size ({0} x {1})".format(m, n))
observed = np.zeros((m, n))
fp = open(blastfile)
all_dots = 0
for row in fp:
b = BlastLine(row)
qi, q = qpadorder[b.query]
si, s = spadorder[b.subject]
qseqid, sseqid = q.seqid, s.seqid
qsi, ssi = qpadid[qseqid], spadid[sseqid]
observed[qsi, ssi] += 1
all_dots += 1
assert int(round(observed.sum())) == all_dots
logging.debug("Total area: {0} x {1}".format(qsize, ssize))
S = qsize * ssize
expected = np.zeros((m, n))
qsum = 0
for i, a in enumerate(qpadnames):
alen = qpadlen[a]
qsum += alen
for j, b in enumerate(spadnames):
blen = spadlen[b]
expected[i, j] = all_dots * alen * blen * 1.0 / S
assert int(round(expected.sum())) == all_dots
# Calculate the statistical significance for each cell
from scipy.stats.distributions import poisson
logmp = np.zeros((m, n))
for i in range(m):
for j in range(n):
obs, exp = observed[i, j], expected[i, j]
pois = max(poisson.pmf(obs, exp), 1e-250) # Underflow
logmp[i, j] = max(-log(pois), 0)
return logmp
def pad(args):
"""
%prog pad blastfile cdtfile --qbed q.pad.bed --sbed s.pad.bed
Test and reconstruct candidate PADs.
"""
from jcvi.formats.cdt import CDT
p = OptionParser(pad.__doc__)
p.set_beds()
p.add_option(
"--cutoff",
default=0.3,
type="float",
help="The clustering cutoff to call similar",
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cutoff = opts.cutoff
blastfile, cdtfile = args
qbed, sbed, qorder, sorder, is_self = check_beds(blastfile, p, opts)
cdt = CDT(cdtfile)
qparts = list(cdt.iter_partitions(cutoff=cutoff))
sparts = list(cdt.iter_partitions(cutoff=cutoff, gtr=False))
qid, sid = {}, {}
for i, part in enumerate(qparts):
qid.update(dict((x, i) for x in part))
for i, part in enumerate(sparts):
sid.update(dict((x, i) for x in part))
# Without writing files, conversion from PAD to merged PAD is done in memory
for q in qbed:
q.seqid = qid[q.seqid]
for s in sbed:
s.seqid = sid[s.seqid]
qnames = range(len(qparts))
snames = range(len(sparts))
logmp = make_arrays(blastfile, qbed, sbed, qnames, snames)
m, n = logmp.shape
pvalue_cutoff = 1e-30
cutoff = -log(pvalue_cutoff)
significant = []
for i in range(m):
for j in range(n):
score = logmp[i, j]
if score < cutoff:
continue
significant.append((qparts[i], sparts[j], score))
for a, b, score in significant:
print("|".join(a), "|".join(b), score)
logging.debug(
"Collected {0} PAR comparisons significant at (P < {1}).".format(
len(significant), pvalue_cutoff
)
)
return significant
def get_segments(ranges, extra, minsegment=40):
"""
Given a list of Range, perform chaining on the ranges and select a highest
scoring subset and cut based on their boundaries. Let's say the projection
of the synteny blocks onto one axis look like the following.
1=====10......20====30....35====~~
Then the segmentation will yield a block [1, 20), [20, 35), using an
arbitrary right extension rule. Extra are additional end breaks for
chromosomes.
"""
from jcvi.utils.range import range_chain, LEFT, RIGHT
NUL = 2
selected, score = range_chain(ranges)
endpoints = [(x.start, NUL) for x in selected]
endpoints += [(x[0], LEFT) for x in extra]
endpoints += [(x[1], RIGHT) for x in extra]
endpoints.sort()
current_left = 0
for a, ai in endpoints:
if ai == LEFT:
current_left = a
if ai == RIGHT:
yield current_left, a
elif ai == NUL:
if a - current_left < minsegment:
continue
yield current_left, a - 1
current_left = a
def write_PAD_bed(bedfile, prefix, pads, bed):
fw = open(bedfile, "w")
padnames = ["{0}:{1:05d}-{2:05d}".format(prefix, a, b) for a, b in pads]
for a, b in pairwise(padnames):
assert a != b, a
j = 0
# Assign all genes to new partitions
for i, x in enumerate(bed):
if i > b:
j += 1
print("\t".join((padnames[j], str(i), str(i + 1), x.accn)), file=fw)
fw.close()
npads = len(pads)
logging.debug("{0} partition written in `{1}`.".format(npads, bedfile))
return npads, padnames
def cluster(args):
"""
%prog cluster blastfile anchorfile --qbed qbedfile --sbed sbedfile
Cluster the segments and form PAD. This is the method described in Tang et
al. (2010) PNAS paper. The anchorfile defines a list of synteny blocks,
based on which the genome on one or both axis can be chopped up into pieces
and clustered.
"""
from jcvi.utils.range import Range
p = OptionParser(cluster.__doc__)
p.set_beds()
p.add_option(
"--minsize", default=10, type="int", help="Only segment using blocks >= size"
)
p.add_option(
"--path", default="~/scratch/bin", help="Path to the CLUSTER 3.0 binary"
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, anchorfile = args
qbed, sbed, qorder, sorder, is_self = check_beds(blastfile, p, opts)
minsize = opts.minsize
ac = AnchorFile(anchorfile)
qranges, sranges = [], []
qextra = [x[1:] for x in qbed.get_breaks()]
sextra = [x[1:] for x in sbed.get_breaks()]
id = 0
for block in ac.iter_blocks(minsize=minsize):
q, s = list(zip(*block))[:2]
q = [qorder[x][0] for x in q]
s = [sorder[x][0] for x in s]
minq, maxq = min(q), max(q)
mins, maxs = min(s), max(s)
id += 1
qr = Range("0", minq, maxq, maxq - minq, id)
sr = Range("0", mins, maxs, maxs - mins, id)
qranges.append(qr)
sranges.append(sr)
qpads = list(get_segments(qranges, qextra))
spads = list(get_segments(sranges, sextra))
suffix = ".pad.bed"
qpf = opts.qbed.split(".")[0]
spf = opts.sbed.split(".")[0]
qpadfile = qpf + suffix
spadfile = spf + suffix
qnpads, qpadnames = write_PAD_bed(qpadfile, qpf, qpads, qbed)
snpads, spadnames = write_PAD_bed(spadfile, spf, spads, sbed)
qpadbed, spadbed = Bed(qpadfile), Bed(spadfile)
logmp = make_arrays(blastfile, qpadbed, spadbed, qpadnames, spadnames)
m, n = logmp.shape
matrixfile = ".".join((qpf, spf, "logmp.txt"))
fw = open(matrixfile, "w")
header = ["o"] + spadnames
print("\t".join(header), file=fw)
for i in range(m):
row = [qpadnames[i]] + ["{0:.1f}".format(x) for x in logmp[i, :]]
print("\t".join(row), file=fw)
fw.close()
# Run CLUSTER 3.0 (Pearson correlation, average linkage)
cmd = op.join(opts.path, "cluster")
cmd += " -g 2 -e 2 -m a -f {0}".format(matrixfile)
pf = matrixfile.rsplit(".", 1)[0]
cdtfile = pf + ".cdt"
if need_update(matrixfile, cdtfile):
sh(cmd)
if __name__ == "__main__":
main()
|
tanghaibao/jcvi
|
jcvi/compara/pad.py
|
Python
|
bsd-2-clause
| 9,149
|
[
"BLAST"
] |
cd32a8ddd18a25a6cb0d73da710e4949def877ad90c31c28cfb0609b51369b05
|
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
frankvdp/django
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 21,955
|
[
"VisIt"
] |
88dd30e166c2439864e368c6787a77740d3b4660cc9987c60185970ba1d57f87
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pylid
from collections import Counter
da = pylid.PyLID(3)
da.total_ngrams = 276913871
da.lang = 'da'
da.ngrams = Counter({
u'er#': 5057266,
u'#de': 3325690,
u'en#': 3164079,
u'et#': 3077357,
u'for': 2560007,
u'#fo': 2199578,
u'de#': 1968899,
u'der': 1670476,
u'at#': 1655815,
u'#og': 1621346,
u'#at': 1556194,
u'ing': 1543432,
u'det': 1519446,
u'og#': 1503669,
u'den': 1381686,
u'nde': 1352705,
u'#me': 1275676,
u'or#': 1245301,
u'#i#': 1200880,
u're#': 1193255,
u'om#': 1162027,
u'il#': 1159504,
u'#vi': 1143571,
u'#af': 1139255,
u'til': 1131152,
u'#ti': 1107126,
u'#er': 1103717,
u'ere': 1100234,
u'ke#': 1072706,
u'ne#': 1039766,
u'lig': 975742,
u'#en': 952819,
u'ter': 950006,
u'af#': 942944,
u'ed#': 938257,
u'and': 916799,
u'ger': 871592,
u'es#': 855731,
u'ge#': 846711,
u'r#d': 830494,
u'nin': 822956,
u'lle': 817087,
u'nge': 810637,
u'#be': 800507,
u'ng#': 795490,
u'te#': 782811,
u'kke': 766954,
u'e#s': 756113,
u'ske': 751106,
u'ion': 743363,
u'els': 738396,
u'e#f': 738239,
u'med': 730762,
u'end': 728820,
u'e#a': 713201,
u'men': 704118,
u'#ha': 703968,
u'som': 702477,
u'le#': 690246,
u'lse': 688592,
u'tte': 685422,
u'e#o': 681872,
u'gen': 680450,
u'#st': 677300,
u'ede': 673379,
u'#om': 669722,
u'ind': 664779,
u'r#e': 660260,
u'ern': 657449,
u'r#a': 652536,
u'ige': 645079,
u'#in': 640605,
u'ste': 639316,
u'#so': 637556,
u'se#': 633263,
u'ar#': 622714,
u'e#e': 618711,
u'r#f': 611060,
u'r#s': 600907,
u'#ko': 597546,
u'ikk': 597517,
u'e#m': 596549,
u'ig#': 594422,
u'rne': 593744,
u'#p\xe5': 590837,
u'isk': 572582,
u'p\xe5#': 569622,
u'ver': 567170,
u'vi#': 567001,
u'e#d': 565436,
u't#s': 563535,
u't#e': 553279,
u't#f': 552698,
u'r#i': 538398,
u'r#o': 537611,
u'ent': 534658,
u'eg#': 527772,
u'jeg': 526572,
u'#ud': 515257,
u'#re': 513775,
u'del': 513707,
u'#si': 508611,
u'ret': 503589,
u'res': 503545,
u'igt': 499646,
u'#fr': 498876,
u'hed': 497909,
u't#d': 494651,
u'har': 493070,
u'ler': 492289,
u'vil': 489819,
u'#eu': 488639,
u'an#': 486424,
u't#a': 477727,
u'e#i': 467657,
u'mme': 464974,
u'nne': 463555,
u'#ik': 461182,
u't#o': 460200,
u'rin': 460163,
u'ska': 451887,
u'#sk': 447982,
u'age': 432574,
u'gt#': 428219,
u'nte': 427610,
u'one': 426833,
u'e#t': 408799,
u'man': 406772,
u'ser': 405600,
u'nd#': 402828,
u'e#p': 402458,
u'ner': 398435,
u'und': 398090,
u'tio': 394024,
u'n#f': 393580,
u'kom': 393493,
u'ati': 391562,
u'sta': 389896,
u'r#m': 388185,
u'tet': 387523,
u't#v': 385346,
u't#i': 380316,
u'#an': 380159,
u'lan': 379424,
u'iss': 379419,
u'get': 375496,
u'e#b': 373647,
u'ene': 373340,
u'n#a': 372726,
u'#mi': 366609,
u'ens': 366349,
u'#he': 364276,
u'omm': 360846,
u'\xf8re': 359487,
u'#et': 358878,
u'ive': 357736,
u'r#v': 357401,
u'enn': 357253,
u'n#s': 352925,
u'ett': 348357,
u'#ve': 346385,
u'ell': 346224,
u'e#v': 343533,
u'mis': 341387,
u's\xe5#': 337610,
u'tig': 337242,
u'eur': 335753,
u'e#k': 335432,
u'ren': 334289,
u'g#a': 333269,
u'#hv': 332614,
u'uro': 332385,
u'#pr': 330818,
u'rer': 330654,
u'e#h': 330485,
u'ord': 329684,
u'mer': 329301,
u'ors': 328670,
u'g#s': 328582,
u'lem': 328014,
u't#m': 327251,
u'al#': 325963,
u'ore': 323470,
u'#je': 323294,
u'n#o': 320937,
u'#sa': 319691,
u'ker': 317734,
u'ove': 316282,
u'orm': 313469,
u'i#d': 312260,
u'nen': 310119,
u'v\xe6r': 308389,
u'rop': 307552,
u'\xe6re': 303411,
u'ale': 302731,
u'#al': 302633,
u'#op': 302154,
u'ans': 296302,
u'kon': 295770,
u'eri': 295235,
u'ang': 293806,
u'#ka': 293620,
u'vor': 293065,
u'#ma': 292917,
u'ghe': 292421,
u'r#b': 291885,
u'igh': 290678,
u'pro': 289813,
u'vis': 289172,
u'eli': 287058,
u'str': 286444,
u'tal': 283895,
u'l#a': 283656,
u'f\xf8r': 283089,
u'dle': 282456,
u'g#f': 281387,
u'\xe5r#': 281194,
u'sig': 280229,
u'g#d': 279205,
u'r#h': 278581,
u'#bl': 276795,
u'n#e': 274423,
u'kan': 273982,
u'kal': 272466,
u'em#': 272458,
u'ngs': 269984,
u't#t': 268026,
u'sen': 266728,
u'#v\xe6': 265282,
u'dre': 264896,
u'lin': 264463,
u'iti': 263170,
u'rbe': 262509,
u'#ge': 262054,
u'r#t': 261953,
u'g#o': 260269,
u'ssi': 259347,
u'tni': 259329,
u'ndr': 258599,
u'dig': 257331,
u'\xe5de': 256175,
u'bet': 255865,
u'n#i': 254190,
u'est': 251833,
u'ege': 250872,
u'ved': 250032,
u've#': 247239,
u'sam': 246973,
u'red': 246742,
u'par': 245534,
u'tat': 244840,
u'#ov': 243226,
u'ate': 243208,
u'bes': 242403,
u'#un': 242323,
u'ide': 239954,
u'ill': 239656,
u'fre': 239361,
u'sio': 239144,
u'ag#': 238693,
u'lit': 238002,
u'nes': 236957,
u'lag': 236541,
u'han': 236447,
u't#h': 236447,
u'g#i': 234463,
u'#la': 234237,
u'mmi': 232268,
u'ers': 232212,
u'tag': 231031,
u'n#m': 230096,
u'e#r': 230054,
u'g#v': 229162,
u'ven': 228915,
u't#b': 228443,
u'lde': 228020,
u'var': 227643,
u'f#d': 227472,
u'sse': 226379,
u'r\xe5d': 226257,
u'#se': 224772,
u'min': 219616,
u't#p': 219097,
u'#pa': 216146,
u'ndl': 216125,
u'ten': 215923,
u'ekt': 214954,
u'ame': 213759,
u'rma': 212726,
u'e#u': 212690,
u'd#a': 212438,
u'dt#': 211849,
u'g#e': 211614,
u'g#m': 211391,
u'kti': 211359,
u'rem': 210725,
u'ns#': 209347,
u'#s\xe5': 208448,
u'rke': 207760,
u'r#k': 207283,
u'tra': 206398,
u'r#p': 205463,
u'n#t': 204426,
u'ogs': 204200,
u'tid': 203956,
u'is#': 203850,
u'kni': 203441,
u'#tr': 203354,
u'ort': 203170,
u'kel': 202967,
u'st#': 202959,
u'gs\xe5': 202929,
u'on#': 202554,
u'#ta': 202532,
u'old': 202382,
u'oli': 202273,
u'g#h': 202036,
u'arb': 201647,
u'tiv': 201225,
u't#k': 201205,
u'rla': 200496,
u'ejd': 200445,
u'm#d': 200374,
u'#m\xe5': 200252,
u'hr#': 199887,
u'rt#': 199873,
u'all': 199762,
u'bej': 199630,
u'sla': 199444,
u'#di': 198686,
u'i#s': 197942,
u'#gr': 197551,
u'ble': 197456,
u'fte': 196838,
u'fra': 195982,
u'ist': 195728,
u'tis': 195370,
u'arl': 192898,
u'sti': 192537,
u'g#t': 190619,
u'sk#': 190378,
u'd#d': 190122,
u'e#l': 188720,
u'tem': 188272,
u'rde': 186924,
u'alt': 186179,
u'el#': 185587,
u'des': 183494,
u'reg': 182641,
u'e#g': 182027,
u'udv': 180974,
u'lt#': 180697,
u'n#d': 180617,
u'lam': 180402,
u'#f\xf8': 179307,
u'pol': 178860,
u'nsk': 178644,
u'n#h': 178494,
u'ra#': 177704,
u'hol': 177517,
u'#va': 177073,
u'n#v': 176901,
u'#sp': 175770,
u'e#n': 174949,
u'gel': 174057,
u'ems': 174046,
u'orb': 173413,
u'l#d': 173364,
u'\xe5#d': 172733,
u'emm': 171309,
u'g\xf8r': 171296,
u'm\xe5l': 171090,
u'i#e': 170452,
u'tor': 170415,
u'me#': 169893,
u't#g': 169645,
u'rst': 169644,
u'kri': 164977,
u'i#f': 164625,
u'bli': 164569,
u'mod': 163403,
u'#fa': 163395,
u'r#n': 163359,
u'kab': 163167,
u'esk': 163162,
u'gru': 163027,
u'm#a': 161081,
u's#f': 161077,
u'op\xe6': 160897,
u'#ar': 160675,
u'nst': 160141,
u'd#f': 159184,
u'#no': 158925,
u'#li': 158865,
u'#fi': 158845,
u'lev': 158685,
u'#da': 158490,
u'ets': 157973,
u'edl': 157695,
u'm#e': 157651,
u'gti': 155942,
u'mel': 155852,
u's#a': 155625,
u'ite': 155568,
u'dis': 155001,
u'\xe6is': 154996,
u'rli': 154850,
u'p\xe6i': 154821,
u'd#o': 154372,
u'ise': 153669,
u'gge': 153560,
u'art': 152125,
u'hen': 151610,
u'\xe6nd': 151240,
u'hvi': 150947,
u'#po': 150805,
u'r#u': 150577,
u's#s': 150141,
u't#u': 150004,
u'gan': 149719,
u'rat': 149705,
u'dem': 149377,
u'ts#': 149344,
u'amm': 149120,
u'akt': 148796,
u'rsl': 148770,
u'sto': 148247,
u'giv': 147725,
u'sik': 147645,
u'nds': 147643,
u'st\xf8': 147617,
u'#mo': 147470,
u'n#b': 147092,
u'hvo': 146814,
u'#ku': 146730,
u's#o': 146187,
u'ndi': 145146,
u'yde': 144101,
u'val': 143646,
u'sst': 143116,
u'dri': 143075,
u'dst': 142535,
u'ber': 141959,
u't\xe6n': 141809,
u'eks': 141672,
u'\xf8r#': 141577,
u'les': 141182,
u'l#s': 141081,
u'#vo': 140985,
u'jde': 140864,
u'ran': 140847,
u'i#h': 140626,
u'nat': 140444,
u'ele': 140332,
u'are': 139193,
u'nem': 139169,
u'bru': 138849,
u'dvi': 138655,
u'tik': 138355,
u'opa': 137958,
u'#hr': 137727,
u'ken': 137722,
u'vet': 137223,
u'#kr': 136442,
u'elt': 135941,
u'fin': 135535,
u'eds': 135340,
u'liv': 135301,
u'mar': 134453,
u'n#k': 134238,
u'rre': 133786,
u'lut': 132361,
u'int': 132311,
u'lli': 132242,
u'd#h': 131726,
u'ffe': 131296,
u's#i': 131190,
u'nal': 131012,
u'rel': 130831,
u'l#f': 130581,
u'#ef': 130447,
u'her': 130072,
u'ris': 129773,
u'lge': 128522,
u'tel': 128124,
u's\xe6r': 128098,
u'r#g': 126696,
u'kun': 126351,
u'rug': 126083,
u'sid': 126039,
u'\xe6nk': 125294,
u'\xf8rg': 124984,
u'dli': 124350,
u'#el': 124080,
u'rig': 124067,
u'dni': 123972,
u'nog': 123819,
u'per': 123744,
u'gsm': 123410,
u'ess': 123276,
u'run': 122736,
u'ad#': 122133,
u'led': 121485,
u'sm\xe5': 121062,
u'rsk': 120662,
u'f\xe6l': 120312,
u'#na': 119818,
u't#n': 119279,
u'n#p': 119115,
u'orh': 119016,
u'sel': 118835,
u'ons': 118824,
u'erf': 118781,
u'd#e': 118580,
u'tre': 118292,
u'sat': 116069,
u'rti': 115894,
u'mul': 115482,
u'vid': 115375,
u'rdi': 115274,
u'sp\xf8': 115263,
u'kte': 115230,
u'#br': 115177,
u'g#b': 115138,
u'p\xf8r': 114948,
u's#m': 114686,
u'eve': 114439,
u'\xe5#a': 114192,
u'm#f': 114039,
u'g#k': 113917,
u'rfo': 112294,
u'slu': 112246,
u'org': 112061,
u'ert': 111957,
u'hel': 111846,
u'd#i': 111746,
u'n#g': 111726,
u'eft': 111603,
u'ik#': 111024,
u'tan': 110437,
u's\xe6t': 110378,
u'\xe5l#': 109837,
u'nt#': 109717,
u'len': 109478,
u'os#': 109181,
u'd#t': 109112,
u'att': 108966,
u'meg': 108883,
u'l#o': 108631,
u'#r\xe5': 108606,
u'ppe': 108515,
u'i#m': 108233,
u'ona': 107994,
u'#mu': 107739,
u'r#r': 107669,
u'pri': 107586,
u'let': 107323,
u'uli': 107174,
u'tur': 106935,
u'ali': 106840,
u'od#': 106808,
u'ine': 106650,
u'syn': 106613,
u'kla': 106408,
u'tli': 106157,
u'gle': 106069,
u'#g\xf8': 105992,
u'nu#': 105927,
u's#d': 105733,
u'et\xe6': 105453,
u'nse': 105202,
u'kra': 105093,
u'rgs': 105061,
u'g#p': 104712,
u'omi': 104658,
u'esl': 104517,
u'hav': 104322,
u'nis': 104309,
u'#f\xe6': 104019,
u'dan': 103850,
u'\xe5#e': 103106,
u'l#e': 103042,
u's#e': 102960,
u'vig': 102681,
u'eu#': 102538,
u'tro': 102478,
u'rge': 102350,
u'#ny': 101873,
u'agt': 101867,
u'abe': 101845,
u'ien': 101758,
u'in#': 101749,
u't#l': 101455,
u'ndt': 101210,
u'd#s': 100934,
u'kt#': 100880,
u'dag': 100822,
u'#ne': 100116,
u'd#m': 100046,
u'r#l': 100005,
u'vir': 99924,
u'irk': 99675,
u'sfo': 99643,
u'###': 99620,
u'gte': 99533,
u'i#a': 99244,
u'ier': 99195,
u'#\xe5r': 99098,
u'\xe6rk': 99067,
u'ld#': 98942,
u'f#e': 98919,
u'tti': 98802,
u'mr\xe5': 98436,
u'nkn': 98382,
u'id#': 98287,
u'omr': 98258,
u'l\xe6g': 97959,
u'ude': 97093,
u'l#v': 97059,
u'ket': 96839,
u'alg': 96703,
u's#b': 96586,
u'g#g': 96576,
u'\xe6ll': 96558,
u'sag': 96497,
u'skr': 96483,
u't\xf8t': 96450,
u'tr\xe6': 96407,
u'#ga': 96343,
u'ina': 96282,
u'\xf8tt': 96232,
u'mss': 96211,
u'nsi': 95992,
u'\xf8rs': 95917,
u'aft': 95891,
u'#or': 95646,
u'm\xe5#': 95220,
u'ave': 95040,
u'edr': 94904,
u'egi': 94866,
u'i#b': 94846,
u'gra': 93583,
u'kre': 93536,
u'#kl': 93519,
u'r#j': 93432,
u'ast': 92937,
u'#os': 92388,
u'#b\xf8': 92063,
u'ono': 91904,
u'bed': 91496,
u'sva': 91440,
u'sit': 90748,
u'nom': 90680,
u'n#r': 90363,
u'b\xf8r': 90189,
u'\xf8ko': 90115,
u'erh': 89728,
u'n#n': 88896,
u'ant': 88795,
u'ked': 88608,
u'\xe6tt': 88244,
u'#ba': 88233,
u'eme': 88060,
u'elv': 88023,
u't#r': 88019,
u'nce': 87886,
u's#v': 87684,
u'mid': 87659,
u'l#g': 87518,
u'i#v': 87027,
u'df\xf8': 86900,
u'ikl': 86777,
u'vik': 86509,
u'ytt': 86462,
u'idl': 86287,
u'\xf8de': 86239,
u'nkt': 86180,
u'jer': 86012,
u'rag': 85924,
u'mil': 85731,
u'ram': 85713,
u'dva': 85581,
u'itu': 85417,
u'#nu': 85288,
u'lad': 85075,
u'oge': 84985,
u'rit': 84772,
u'\xe6ng': 84579,
u'sni': 84243,
u'm#s': 84042,
u's#p': 83422,
u'ted': 83324,
u'beh': 83241,
u'oll': 83170,
u'#gi': 83095,
u'nve': 83076,
u'dta': 82748,
u'kli': 82702,
u'eni': 82652,
u'rte': 82612,
u'st\xe5': 82601,
u'#n\xf8': 82557,
u'#f\xe5': 82539,
u'ode': 82302,
u'g#u': 82166,
u'#\xf8k': 82151,
u'\xe6ld': 82145,
u'l#b': 81933,
u'#ho': 81827,
u'm#v': 81624,
u'#le': 81428,
u'lar': 81252,
u'\xe5#v': 81220,
u'ges': 81194,
u'ial': 80993,
u'udg': 80780,
u'lov': 80633,
u'r##': 80577,
u'rek': 80455,
u'i#k': 80236,
u'#bi': 79799,
u'#to': 79585,
u'dsk': 79361,
u'spe': 79336,
u'#fl': 79318,
u'sin': 79266,
u'#\xe6n': 79016,
u'#lo': 78937,
u'spo': 78923,
u'sky': 78765,
u'lke': 78595,
u'#sy': 78458,
u'ntr': 78432,
u'\xe6r#': 78404,
u'#te': 78230,
u'ald': 77960,
u'unk': 77835,
u'emt': 77741,
u'ids': 77721,
u'ark': 77595,
u'f\xf8l': 77401,
u'ure': 77178,
u'uni': 77098,
u'net': 76852,
u'afs': 76655,
u'fat': 76505,
u'\xe6rd': 76501,
u'rme': 76032,
u'\xe5le': 75808,
u'\xf8ge': 75782,
u'\xf8je': 75780,
u'ukt': 75580,
u'#bo': 75559,
u'rak': 75549,
u'm#h': 75428,
u'bin': 75379,
u'tyd': 75324,
u'n#u': 75224,
u'lis': 75149,
u'ses': 75065,
u'pla': 74997,
u'n\xf8d': 74885,
u'#n\xe6': 74848,
u'm#m': 74816,
u'#pe': 74526,
u'idt': 74503,
u'ud#': 74262,
u'ori': 74256,
u'bor': 74142,
u'\xe5#s': 74092,
u'iv#': 74044,
u'teg': 74028,
u'obl': 73944,
u'#go': 73786,
u'l#i': 73761,
u'\xe6gg': 73677,
u'god': 73625,
u'#h\xf8': 73559,
u'ins': 73425,
u's#t': 73397,
u'#s\xe6': 73297,
u'gsf': 73256,
u'#bu': 73116,
u'#n\xe5': 72974,
u'i#i': 72822,
u'tru': 72719,
u'\xf8ns': 72331,
u'ire': 72278,
u'rbi': 72087,
u's#h': 72087,
u'van': 71956,
u'pa#': 71928,
u'un#': 71750,
u'r\xe6n': 71729,
u'm#i': 71703,
u'mpe': 71531,
u'bek': 71523,
u'nke': 71241,
u'uge': 71193,
u'pun': 71156,
u'#im': 71093,
u'spr': 71071,
u'nta': 70891,
u'rks': 70867,
u's\xf8g': 70798,
u'utn': 70745,
u'erv': 70721,
u'i#o': 70506,
u'ntl': 70149,
u'g#r': 70134,
u'por': 70134,
u'g\xe5r': 70086,
u'fek': 70035,
u'ora': 69781,
u'ikr': 69630,
u'ini': 69620,
u'us#': 69516,
u'rob': 69197,
u'ont': 69183,
u'fri': 69177,
u'beg': 69134,
u'fru': 68887,
u'ils': 68783,
u'ete': 68533,
u'ror': 68190,
u'cer': 68163,
u'n\xe5r': 67302,
u'olk': 67165,
u'vad': 66618,
u'#fu': 66569,
u'l#h': 66487,
u'tri': 66396,
u'rod': 66357,
u'mig': 66314,
u'hva': 66197,
u'tyr': 66080,
u'met': 65801,
u'\xe5#f': 65426,
u'fol': 65332,
u'nan': 65303,
u'bud': 65082,
u'ru#': 65051,
u'jds': 64956,
u'rol': 64635,
u'#dr': 64530,
u'opf': 64484,
u'\xf8lg': 64430,
u'rie': 64370,
u'ev#': 64248,
u'ned': 64109,
u'nli': 63961,
u'ilk': 63936,
u'upp': 63884,
u'sek': 63829,
u'eta': 63809,
u'#ra': 63653,
u'efo': 63592,
u'#of': 63573,
u'raf': 63546,
u'edt': 63149,
u'm#o': 63114,
u'f#a': 63098,
u'#l\xf8': 63078,
u'egr': 63051,
u'm\xe6r': 63031,
u'l\xf8s': 62840,
u'rid': 62826,
u'ult': 62799,
u'\xe6kk': 62403,
u'r\xe6k': 62369,
u'dve': 62320,
u'vel': 62153,
u'ade': 61879,
u'dr\xf8': 61869,
u'l#m': 61834,
u'rs#': 61831,
u'rup': 61628,
u'n#l': 61444,
u'a#d': 61429,
u'mt#': 61338,
u'hve': 61121,
u'enh': 61073,
u'era': 60949,
u'#ek': 60818,
u'cen': 60603,
u'rha': 60474,
u'#do': 60379,
u'dir': 60299,
u'kso': 60257,
u'eje': 59974,
u'rhe': 59839,
u'f#s': 59700,
u'rho': 59629,
u'nye': 59610,
u'unn': 59554,
u'jen': 59444,
u'#\xf8n': 59430,
u'f#f': 59397,
u'emo': 59379,
u'idi': 59379,
u'#gl': 59342,
u't\xf8r': 59070,
u'lta': 59026,
u'evi': 58968,
u'tin': 58921,
u'vin': 58819,
u'gne': 58752,
u'rts': 58697,
u'sku': 58443,
u'lik': 58285,
u'kol': 58172,
u'bev': 57974,
u'bel': 57834,
u'ari': 57821,
u'tes': 57723,
u'rog': 57704,
u'oci': 57652,
u'soc': 57631,
u'uds': 57601,
u'fle': 57544,
u'ili': 57534,
u'ltn': 57525,
u'ral': 57447,
u'n##': 57426,
u'ane': 57417,
u'#hu': 57405,
u'#su': 57391,
u's#r': 57382,
u'#fe': 57296,
u'sis': 57098,
u's#k': 57014,
u'ety': 57002,
u'\xe5be': 56839,
u'pr\xe6': 56829,
u'nel': 56784,
u'#l\xe6': 56667,
u'roc': 56603,
u'##o': 56590,
u'op#': 56551,
u'cia': 56394,
u'gr\xe6': 56315,
u'emf': 56312,
u'gsp': 56206,
u'err': 56093,
u'pe#': 56087,
u'#pl': 56014,
u'\xe5#m': 55965,
u'l#k': 55796,
u'fta': 55793,
u'oce': 55706,
u'd#v': 55657,
u'lv#': 55589,
u'mf\xf8': 55520,
u'to#': 55435,
u'ama': 55414,
u'ynd': 55054,
u'e##': 55049,
u'pen': 55048,
u'bil': 55000,
u'tak': 54942,
u'apa': 54855,
u'i#t': 54741,
u'omh': 54709,
u'fun': 54652,
u'ye#': 54619,
u'je#': 54573,
u'i#p': 54539,
u'ani': 54467,
u'\xe6de': 54429,
u'dra': 54329,
u'm#b': 54231,
u'#h\xe5': 54174,
u'nti': 54121,
u'sty': 54044,
u'#kv': 53950,
u'da#': 53944,
u'sme': 53909,
u'\xf8dv': 53846,
u'amt': 53826,
u'enc': 53694,
u'di#': 53659,
u'\xe5#g': 53602,
u'leg': 53599,
u'onk': 53464,
u'nio': 53442,
u'#g\xe5': 53408,
u'g#n': 53390,
u'e#\xf8': 53371,
u'mti': 53353,
u'mst': 53349,
u'egn': 53284,
u'dsp': 53274,
u'erk': 53073,
u'm#k': 52827,
u'nsy': 52827,
u'igg': 52772,
u'afg': 52679,
u'esu': 52643,
u'yn#': 52630,
u'erd': 52448,
u'erg': 52433,
u'ika': 52382,
u'erl': 52313,
u'#ri': 52193,
u'eho': 52159,
u'dsa': 52104,
u'sie': 52098,
u'm\xf8d': 52055,
u'kr\xe6': 51892,
u'rs\xf8': 51890,
u'f#m': 51867,
u'fas': 51704,
u'\xe5#i': 51666,
u'pre': 51542,
u'off': 51499,
u'ats': 51427,
u'ogl': 51325,
u'h\xf8j': 51324,
u'm#t': 51196,
u'tab': 51176,
u'uti': 51155,
u'##m': 50993,
u'ogr': 50832,
u'dar': 50749,
u'g##': 50577,
u'd#p': 50543,
u'ss\xe6': 50511,
u'eng': 50508,
u'ben': 50395,
u'eus': 50328,
u'yst': 50266,
u'lid': 50123,
u'odu': 50118,
u'kst': 50118,
u'ab#': 49982,
u'udt': 49969,
u'adi': 49963,
u'atu': 49922,
u'l#t': 49889,
u'\xe5et': 49805,
u'ful': 49788,
u'#ty': 49716,
u'kat': 49652,
u'kul': 49615,
u'pap': 49551,
u'#sv': 49424,
u'yrk': 49297,
u'fal': 49225,
u'iel': 49144,
u'ivi': 49132,
u'g#l': 49051,
u'v#o': 48966,
u'mhe': 48778,
u'dde': 48727,
u'ilf': 48376,
u'\xe6ve': 48297,
u'bef': 48214,
u'log': 48141,
u'uld': 48138,
u'imi': 48131,
u'\xe6ns': 48091,
u'okr': 48084,
u'rve': 48068,
u'ilj': 47951,
u'hov': 47943,
u'eha': 47851,
u'f\xe5#': 47832,
u'nsp': 47826,
u'ann': 47816,
u'tje': 47800,
u'ks#': 47650,
u'fer': 47617,
u'tek': 47579,
u'rdr': 47411,
u'rdn': 47400,
u'i#l': 47225,
u'nit': 47145,
u'emn': 47135,
u'vej': 47010,
u'\xe6st': 46921,
u'd#k': 46910,
u'oms': 46902,
u'\xf8rt': 46880,
u'rna': 46863,
u'st\xe6': 46815,
u'etn': 46797,
u'lys': 46763,
u'r\xf8r': 46752,
u'tit': 46707,
u'mok': 46490,
u'\xe6se': 46343,
u'rso': 46322,
u'sor': 46158,
u'#is': 46099,
u'ift': 46039,
u'orv': 45817,
u'#ak': 45792,
u'uat': 45784,
u'eml': 45728,
u'igs': 45689,
u'tua': 45616,
u'm#p': 45607,
u'g#j': 45559,
u'it#': 45551,
u'bag': 45472,
u'ds#': 45429,
u'kyt': 45419,
u'l#j': 45414,
u'dom': 45403,
u't##': 45376,
u'ass': 45312,
u'h\xf8r': 45139,
u'odt': 45058,
u'anv': 44971,
u'd#b': 44661,
u'set': 44612,
u'ldt': 44148,
u'ves': 44092,
u'\xe5#b': 44029,
u'#yd': 44002,
u's#u': 43989,
u'#ad': 43950,
u'\xe5#h': 43835,
u'i#g': 43818,
u'eva': 43588,
u'vne': 43326,
u'dge': 43183,
u'gni': 43183,
u'e#\xe5': 43183,
u't\xe6r': 43171,
u'jor': 42890,
u'duk': 42865,
u'pas': 42813,
u'gio': 42810,
u'pek': 42785,
u'dbr': 42712,
u'akk': 42700,
u'\xe5#o': 42697,
u'ndb': 42682,
u'm\xe5d': 42630,
u'ank': 42375,
u'utt': 42179,
u'#hj': 42147,
u'ktu': 42138,
u'opr': 42037,
u'kto': 41859,
u'r\xe6s': 41770,
u'l#p': 41740,
u'rgi': 41650,
u'g\xe6l': 41615,
u'ndh': 41428,
u'rga': 41395,
u'bar': 41350,
u'yld': 41331,
u'k#f': 41171,
u'tut': 40975,
u'\xe5#t': 40967,
u'avn': 40954,
u'sys': 40946,
u'lte': 40824,
u'vni': 40812,
u'#pu': 40801,
u'f#k': 40705,
u'u#f': 40698,
u'nyt': 40696,
u'ykk': 40652,
u'son': 40622,
u'#ro': 40609,
u'#sl': 40593,
u'fti': 40581,
u'##s': 40573,
u'mat': 40562,
u'top': 40545,
u'ndf': 40533,
u'ces': 40483,
u'mli': 40441,
u'fen': 40342,
u'ume': 40341,
u'\xe6ft': 40321,
u'\xe6rl': 40244,
u'igv': 40226,
u's\xe5d': 40186,
u'\xe5da': 40185,
u'lst': 40180,
u'gst': 40157,
u't\xe5r': 40133,
u'be#': 40031,
u'#eg': 40022,
u'ovg': 39934,
u'ksi': 39917,
u'vgi': 39727,
u'rik': 39711,
u'mes': 39694,
u'ug#': 39608,
u'#hi': 39535,
u'kor': 39496,
u'nig': 39491,
u'ndn': 39438,
u'h\xe6n': 39425,
u'l##': 39413,
u'ivn': 39361,
u'eff': 39333,
u'f#p': 39287,
u'nsv': 39085,
u'orl': 39015,
u'pli': 39015,
u'a#e': 38835,
u'esp': 38829,
u'lj\xf8': 38817,
u'l#u': 38792,
u'gvi': 38778,
u'k#o': 38627,
u's#l': 38600,
u'sul': 38589,
u'rad': 38583,
u'bal': 38501,
u'l#r': 38498,
u'edu': 38495,
u'ref': 38418,
u'tad': 38414,
u'omp': 38338,
u'rio': 38284,
u'dnu': 38085,
u'isi': 38065,
u'ela': 38059,
u'f#b': 38054,
u'kta': 38038,
u'ara': 37980,
u'ust': 37924,
u'sk\xe6': 37817,
u'lba': 37681,
u'anl': 37624,
u'opm': 37623,
u'rbr': 37579,
u'far': 37441,
u'k#s': 37427,
u'orf': 37239,
u'rea': 37054,
u'ssa': 37004,
u'gis': 36943,
u'ilb': 36915,
u'try': 36911,
u'cce': 36894,
u'sl\xe5': 36886,
u'dte': 36769,
u'#ly': 36673,
u'\xe6ss': 36648,
u'nci': 36643,
u'k#p': 36456,
u'##h': 36436,
u'ryk': 36396,
u'm\xe6s': 36380,
u'rej': 36310,
u'i#n': 36259,
u'ung': 36147,
u'epr': 36113,
u'ism': 36069,
u's#g': 36052,
u'\xe6vn': 36044,
u'abs': 35905,
u'h\xe5b': 35865,
u'inc': 35751,
u'dtr': 35663,
u'n\xe6v': 35654,
u'rep': 35647,
u'g\xe5#': 35646,
u't#j': 35631,
u'kur': 35627,
u'tsa': 35613,
u'lsk': 35610,
u'byg': 35584,
u'ruk': 35579,
u'riv': 35556,
u'ssk': 35521,
u'##a': 35488,
u'gjo': 35472,
u'rda': 35448,
u'fis': 35419,
u'rdf': 35366,
u'ban': 35333,
u'imo': 35303,
u'u#e': 35270,
u'pel': 35244,
u'ept': 35214,
u'i#r': 35189,
u'san': 35158,
u'a#o': 35084,
u'eto': 35076,
u'rev': 35065,
u'i#\xf8': 35057,
u'i#u': 35037,
u'ce#': 34972,
u'cip': 34961,
u'ldn': 34891,
u'j\xe6l': 34856,
u'nda': 34724,
u'enk': 34690,
u'sol': 34678,
u'rim': 34664,
u'ita': 34548,
u'iet': 34462,
u'ibe': 34401,
u'omf': 34320,
u'f#v': 34286,
u'd#j': 34222,
u'rd#': 34070,
u'n\xe6s': 33980,
u'gso': 33658,
u'ndg': 33610,
u'r\xe6f': 33571,
u'e#\xe6': 33472,
u'mni': 33467,
u'blo': 33433,
u'rum': 33306,
u'as#': 33290,
u'mfa': 33273,
u'ygt': 33264,
u'sko': 33191,
u'rkn': 33171,
u'sud': 33117,
u'vt#': 32982,
u'ri#': 32952,
u'pos': 32900,
u'd#u': 32876,
u'f\xe6r': 32845,
u'udf': 32816,
u'eti': 32791,
u'opl': 32712,
u'dl\xe6': 32675,
u'\xe6lp': 32627,
u'ods': 32585,
u'tie': 32553,
u'hj\xe6': 32544,
u'ndv': 32538,
u'fst': 32496,
u'#jo': 32398,
u'gre': 32377,
u'i##': 32358,
u'ifi': 32297,
u'kle': 32265,
u'cep': 32264,
u'#id': 32255,
u'\xf8st': 32253,
u'bri': 32245,
u'is\xe6': 32217,
u'#g\xe6': 32182,
u'rus': 32038,
u'lf\xe6': 31974,
u'ygg': 31857,
u'fg\xf8': 31845,
u's#n': 31717,
u'urd': 31686,
u'\xe5#k': 31609,
u'gi#': 31557,
u'inf': 31518,
u'ft#': 31510,
u'urr': 31457,
u'dda': 31447,
u'lie': 31410,
u'kvi': 31368,
u'ike': 31354,
u'vit': 31294,
u'tol': 31264,
u'n#j': 31221,
u'ple': 31210,
u'spu': 31160,
u's\xe5l': 31152,
u'sun': 31107,
u'a#s': 31067,
u'\xe6mp': 31035,
u'#tj': 30973,
u'#m\xf8': 30931,
u'dse': 30908,
u'erm': 30872,
u'anc': 30843,
u'#ru': 30764,
u'k\xe6m': 30741,
u'nor': 30697,
u'ivt': 30524,
u'ced': 30401,
u'am#': 30375,
u'#as': 30361,
u'spl': 30335,
u'url': 30232,
u'##e': 30228,
u'acc': 30134,
u'#us': 30106,
u'etr': 30053,
u'sli': 29970,
u'alv': 29961,
u'l\xe6n': 29955,
u'nku': 29952,
u'#tv': 29950,
u'olo': 29866,
u'fak': 29858,
u'msk': 29802,
u'spi': 29745,
u'idr': 29737,
u'v#f': 29733,
u'pet': 29687,
u'gav': 29654,
u'ov#': 29652,
u'g\xe5e': 29646,
u'lkn': 29626,
u'l#n': 29514,
u'ost': 29508,
u'ime': 29459,
u'dhe': 29295,
u'#em': 29283,
u'afh': 29213,
u'ice': 29165,
u'amf': 29088,
u'f#h': 29063,
u'vde': 29047,
u'\xe6gt': 29020,
u'dla': 28936,
u'lel': 28905,
u'm\xe6n': 28887,
u'r\xe6d': 28886,
u'e#j': 28881,
u'm#r': 28826,
u'udd': 28797,
u'm#j': 28785,
u'd#r': 28716,
u'orr': 28687,
u'ur#': 28648,
u'ags': 28610,
u'uel': 28610,
u'aml': 28559,
u'inv': 28557,
u'sre': 28525,
u'l#l': 28525,
u'dg\xe5': 28518,
u'dam': 28494,
u'lyk': 28489,
u'ega': 28485,
u'\xf8rr': 28481,
u'kse': 28475,
u'tia': 28389,
u'#gj': 28387,
u'#ke': 28380,
u'ejl': 28364,
u'dog': 28335,
u'lot': 28285,
u'ule': 28285,
u'bid': 28283,
u'dga': 28277,
u'sbe': 28228,
u'\xf8ft': 28219,
u'##d': 28179,
u'sv\xe6': 28108,
u'm#u': 28086,
u'ipp': 27999,
u'dsm': 27769,
u'mfu': 27733,
u'ogi': 27675,
u'rud': 27617,
u'ng\xe5': 27616,
u'ti#': 27565,
u't#\xf8': 27535,
u'fik': 27437,
u'\xe5#n': 27433,
u'din': 27430,
u'n\xe5#': 27392,
u'die': 27377,
u'orp': 27375,
u'l\xe6r': 27355,
u'iat': 27346,
u'ato': 27345,
u'lds': 27294,
u'f#o': 27294,
u'ot#': 27286,
u'r\xe6v': 27208,
u'gar': 27187,
u'aff': 27184,
u'\xe5#l': 27183,
u'rom': 27175,
u'#sm': 27167,
u'oru': 27128,
u'our': 27088,
u'ski': 27070,
u'fly': 27054,
u'jek': 27032,
u'ond': 26998,
u'rm#': 26964,
u'jel': 26905,
u'#s\xf8': 26858,
u'lat': 26839,
u'opn': 26816,
u'pn\xe5': 26786,
u'oni': 26782,
u'enb': 26743,
u'osi': 26742,
u'rav': 26735,
u'l\xf8b': 26706,
u'emb': 26664,
u'#ju': 26639,
u'\xe5#p': 26603,
u'#lu': 26560,
u'ole': 26525,
u'deb': 26517,
u'onf': 26499,
u'ikt': 26494,
u'hin': 26485,
u'gif': 26405,
u'mon': 26386,
u'ekn': 26295,
u'f#t': 26292,
u'ndu': 26259,
u'erb': 26235,
u'##i': 26235,
u'mor': 26216,
u'\xf8sn': 26210,
u'gie': 26207,
u'bla': 26196,
u'#\xf8j': 26185,
u'ign': 26103,
u'myn': 26050,
u'\xe5nd': 26047,
u'sm\xe6': 25957,
u'lek': 25938,
u'gs#': 25905,
u'\xe6tn': 25846,
u'v\xe6g': 25778,
u'rkl': 25764,
u'\xf8be': 25691,
u'lvo': 25641,
u'urt': 25593,
u'kva': 25579,
u'mit': 25485,
u'edi': 25454,
u'emi': 25452,
u'ard': 25443,
u'k\xe6f': 25424,
u'#ig': 25423,
u'kos': 25367,
u'dur': 25346,
u'rsi': 25336,
u'dus': 25278,
u'kes': 25178,
u'app': 25138,
u'u#o': 25079,
u'#ce': 25064,
u'ork': 25061,
u'\xf8se': 25059,
u'g#\xf8': 25041,
u'av#': 25010,
u'pfo': 25008,
u'nfo': 24938,
u'eau': 24927,
u'pil': 24922,
u'rfa': 24892,
u'ull': 24859,
u'em\xe6': 24817,
u'gde': 24813,
u'pte': 24807,
u'ilt': 24800,
u'dat': 24791,
u'vat': 24747,
u'm#g': 24741,
u'f\xe5r': 24717,
u'n#\xf8': 24701,
u'lti': 24700,
u'##f': 24667,
u'ydn': 24654,
u'f#r': 24647,
u'#r\xe6': 24646,
u'tar': 24631,
u'm#l': 24625,
u'spa': 24551,
u'v\xe6k': 24541,
u'#ua': 24465,
u'egl': 24463,
u'hur': 24398,
u'reb': 24350,
u'abi': 24313,
u'#b\xe5': 24268,
u'b\xe5d': 24231,
u'nie': 24213,
u'nar': 24210,
u'#t\xe6': 24178,
u'idd': 24165,
u'usi': 24093,
u'avd': 24089,
u'f#l': 24086,
u't\xe5e': 24084,
u'nav': 24074,
u'm#n': 24068,
u'n\xe6r': 24035,
u'orn': 23986,
u'abo': 23908,
u'#by': 23867,
u'ase': 23864,
u'd#n': 23857,
u'ads': 23840,
u'k#i': 23795,
u'enl': 23766,
u'ts\xe6': 23734,
u'ek\xe6': 23720,
u'ol#': 23710,
u'a#f': 23688,
u'\xf8ve': 23685,
u'dje': 23659,
u'edj': 23618,
u'#dy': 23615,
u'tvi': 23598,
u'#am': 23542,
u'vol': 23524,
u'lla': 23524,
u'#ac': 23514,
u'#ir': 23501,
u'uda': 23493,
u'u#s': 23445,
u'#\xe5b': 23434,
u'r#\xf8': 23411,
u'oba': 23389,
u'nss': 23384,
u'omk': 23266,
u'svi': 23214,
u'\xe5rs': 23189,
u'ev\xe6': 23182,
u'nkl': 23146,
u'rih': 23102,
u'l\xe6d': 23082,
u'ata': 23072,
u'oku': 23069,
u'ote': 23038,
u'anm': 23025,
u'lg#': 23019,
u'gsl': 22945,
u'\xe6ge': 22930,
u'bre': 22924,
u'rva': 22918,
u'sle': 22888,
u'rsa': 22852,
u'd#g': 22794,
u'a#h': 22761,
u'dfo': 22729,
u'h\xe6v': 22704,
u'uft': 22683,
u'pm\xe6': 22655,
u'ds\xe6': 22453,
u'mun': 22446,
u'skn': 22402,
u'yse': 22380,
u'fon': 22370,
u'\xe5en': 22307,
u'gl\xe6': 22307,
u'sma': 22275,
u'niv': 22225,
u'ply': 22210,
u'#k\xf8': 22207,
u'esv': 22178,
u'r\xf8f': 22153,
u'yne': 22144,
u'rn#': 22121,
u'nkr': 21977,
u'eno': 21970,
u'b\xe6r': 21947,
u'ukk': 21919,
u'rif': 21914,
u'bur': 21905,
u'isa': 21895,
u'fsl': 21886,
u'um#': 21865,
u'roj': 21849,
u'usa': 21843,
u'jeb': 21819,
u'tne': 21813,
u'dss': 21736,
u'a#m': 21675,
u'rpl': 21654,
u'fic': 21643,
u'aks': 21577,
u'ro#': 21573,
u'\xe6rr': 21564,
u'oro': 21551,
u'fan': 21488,
u'ugs': 21479,
u'ngl': 21439,
u'bas': 21409,
u'a#a': 21322,
u'ngi': 21312,
u'duc': 21271,
u'eba': 21231,
u'ile': 21225,
u'lob': 21223,
u'uce': 21210,
u'tim': 21175,
u'f#i': 21170,
u'ala': 21052,
u'#my': 21017,
u'ndd': 21007,
u'nmo': 20965,
u'mbe': 20960,
u'kas': 20953,
u'bat': 20907,
u'egy': 20872,
u'rro': 20834,
u'mte': 20833,
u'#m\xe6': 20753,
u'u#i': 20748,
u'h\xe5n': 20714,
u'\xe5sk': 20601,
u'uri': 20598,
u'dev': 20561,
u'eru': 20549,
u'\xf8ri': 20535,
u'ana': 20481,
u'oje': 20467,
u'k#a': 20455,
u'ebl': 20413,
u'ild': 20391,
u'rsv': 20386,
u'ihe': 20381,
u'dia': 20368,
u'emp': 20361,
u'mne': 20353,
u'd##': 20349,
u'#ki': 20345,
u'ak#': 20313,
u'uss': 20310,
u'pra': 20309,
u'nga': 20301,
u'f#u': 20289,
u'gdo': 20288,
u'ngt': 20250,
u'agd': 20209,
u'uv\xe6': 20202,
u'e#c': 20140,
u'dsf': 20120,
u'stn': 20087,
u'k#e': 20051,
u'arm': 20050,
u'\xf8rn': 20040,
u'#\xf8g': 19998,
u'dgi': 19965,
u'ops': 19912,
u'ima': 19903,
u'glo': 19891,
u'sso': 19882,
u'ebe': 19845,
u'k#m': 19795,
u'eky': 19795,
u'nhe': 19787,
u'f\xf8d': 19733,
u'kym': 19695,
u'ymr': 19686,
u'lim': 19657,
u'ldi': 19655,
u'sts': 19636,
u'ltu': 19608,
u'\xe5#u': 19597,
u'rta': 19569,
u'nuv': 19565,
u'rm\xe5': 19415,
u'fh\xe6': 19408,
u'dyg': 19341,
u'mle': 19308,
u'vnt': 19301,
u'\xf8dt': 19218,
u'\xf8mm': 19209,
u'rki': 19195,
u'ysn': 19028,
u'bem': 19002,
u'd#l': 18995,
u'ras': 18986,
u'ior': 18962,
u'sa#': 18960,
u'ie#': 18914,
u'u#h': 18907,
u'pta': 18893,
u'ejs': 18873,
u'sem': 18867,
u'eko': 18829,
u'rot': 18827,
u'ami': 18819,
u'fyl': 18801,
u'vnl': 18775,
u'kus': 18757,
u'luk': 18743,
u'gss': 18718,
u'yk#': 18675,
u'm\xe5s': 18624,
u'v\xe6s': 18588,
u'yre': 18556,
u'opt': 18550,
u'fje': 18536,
u'eso': 18515,
u'lpe': 18511,
u'stl': 18405,
u'vea': 18370,
u'tsm': 18344,
u'a#i': 18343,
u'kyl': 18305,
u'\xe6ri': 18289,
u'rtn': 18280,
u'kam': 18265,
u'edy': 18259,
u'ppo': 18244,
u'ekv': 18241,
u'iod': 18180,
u'atn': 18150,
u'edd': 18097,
u'dti': 18074,
u'ej#': 18063,
u'fe#': 18003,
u't#\xe6': 17987,
u'rce': 17987,
u'nsa': 17976,
u'a#v': 17973,
u'ny#': 17956,
u'kl\xe6': 17933,
u'gyn': 17865,
u'#fj': 17862,
u'vur': 17858,
u'ero': 17846,
u'gat': 17819,
u'tss': 17819,
u'ia#': 17793,
u'nbl': 17779,
u'opg': 17722,
u'#co': 17721,
u'gtn': 17648,
u'sch': 17635,
u'eci': 17560,
u'efi': 17548,
u'amp': 17544,
u'na#': 17528,
u'#ul': 17501,
u'kve': 17477,
u'ema': 17468,
u'mpl': 17451,
u'usl': 17442,
u'#ni': 17418,
u'rap': 17384,
u'adg': 17338,
u'v#s': 17324,
u'lve': 17323,
u'f#n': 17318,
u'#tu': 17312,
u'oto': 17280,
u'ici': 17248,
u'#ja': 17241,
u'oft': 17237,
u'kil': 17228,
u'smi': 17221,
u'onv': 17211,
u'ars': 17191,
u'pec': 17190,
u'\xe6ff': 17182,
u'rvi': 17174,
u'bis': 17172,
u'k#h': 17159,
u'enr': 17130,
u'lok': 17109,
u'k\xe6r': 17083,
u't\xe5#': 17082,
u'#d\xf8': 17058,
u'lav': 17015,
u'ekr': 17011,
u'a#k': 16950,
u'alu': 16948,
u'rtr': 16946,
u'ida': 16941,
u'uer': 16928,
u'pfa': 16917,
u'kin': 16902,
u'lio': 16886,
u'ksp': 16839,
u'als': 16829,
u'lej': 16825,
u'#ca': 16824,
u'k#t': 16783,
u'esi': 16752,
u'tue': 16739,
u'#ev': 16690,
u'it\xe6': 16657,
u'lib': 16644,
u'asp': 16598,
u'rf\xf8': 16596,
u'lk#': 16578,
u'ira': 16560,
u'hun': 16543,
u'gig': 16518,
u'v#i': 16490,
u'tog': 16489,
u'cha': 16482,
u'rbu': 16372,
u'\xe5#r': 16340,
u'ok#': 16298,
u'k#d': 16295,
u'\xe6ks': 16249,
u'#ci': 16244,
u'kie': 16224,
u'alo': 16191,
u'ryd': 16165,
u'dyr': 16101,
u'che': 16087,
u'#k\xe6': 16082,
u'vri': 16076,
u'u#m': 16044,
u'ria': 16042,
u'dne': 16033,
u'f\xf8j': 16033,
u'sl\xf8': 16029,
u'imp': 16014,
u'env': 15990,
u'cie': 15977,
u'dsl': 15973,
u'ldr': 15955,
u'agn': 15954,
u'\xe5ne': 15903,
u'she': 15880,
u'udn': 15847,
u'#ch': 15835,
u'f#g': 15829,
u'lje': 15772,
u'mal': 15754,
u'#b\xe6': 15726,
u'v#d': 15713,
u'n\xe5e': 15707,
u'ged': 15678,
u'hje': 15634,
u'rof': 15630,
u'#\xf8v': 15628,
u'evn': 15620,
u'ivl': 15582,
u'nop': 15569,
u'k\xf8n': 15565,
u'a##': 15558,
u'bon': 15549,
u'emg': 15482,
u'pfy': 15463,
u'tas': 15455,
u'edn': 15450,
u'\xe6rt': 15426,
u'nol': 15398,
u'viv': 15383,
u'pan': 15377,
u'jo#': 15289,
u'k#v': 15285,
u'#h\xe6': 15285,
u'mag': 15210,
u'tod': 15174,
u'r\xe6b': 15174,
u'#\xf8s': 15160,
u'kap': 15102,
u'tom': 15082,
u'u#k': 15070,
u'lus': 15046,
u'\xe5d#': 15017,
u'mpr': 15004,
u'olu': 14966,
u'nvi': 14959,
u'nla': 14955,
u'lp#': 14944,
u'#sn': 14936,
u'kno': 14933,
u'ltr': 14924,
u'pga': 14893,
u'ekl': 14876,
u'kad': 14829,
u'tv\xe6': 14805,
u'\xe5re': 14805,
u'hus': 14727,
u'rri': 14717,
u'odk': 14663,
u'peg': 14661,
u'm\xe5n': 14637,
u'r#c': 14589,
u'#ug': 14539,
u'ink': 14536,
u'eg\xe5': 14536,
u'sou': 14531,
u'udb': 14531,
u'#ut': 14483,
u'dke': 14474,
u'etf': 14470,
u'tof': 14454,
u'dok': 14443,
u'nfe': 14432,
u'ntu': 14429,
u'luf': 14391,
u'oka': 14380,
u'sal': 14357,
u'ree': 14308,
u'deh': 14305,
u'kis': 14293,
u'mi#': 14290,
u'klu': 14269,
u'\xf8vr': 14262,
u'cis': 14245,
u'##\xe5': 14242,
u'\xe5ls': 14235,
u'nok': 14218,
u'ut#': 14213,
u'au#': 14190,
u'yns': 14180,
u'#vu': 14172,
u'tla': 14163,
u'rmi': 14160,
u'ssy': 14122,
u'gev': 14117,
u'sab': 14098,
u'bne': 14085,
u's#\xf8': 14077,
u'ilg': 14022,
u't#\xe5': 14020,
u'urc': 14010,
u'\xe5##': 14006,
u'arg': 13979,
u'abl': 13938,
u'#ag': 13933,
u'nov': 13931,
u'kum': 13922,
u'a#t': 13899,
u'nri': 13898,
u'nho': 13845,
u'eel': 13836,
u'mie': 13788,
u'\xf8j#': 13773,
u'ros': 13743,
u'a#b': 13719,
u'deg': 13704,
u'ka#': 13700,
u'pak': 13679,
u'nts': 13677,
u'ta#': 13673,
u'p#t': 13662,
u'l\xe6s': 13640,
u'def': 13544,
u'##j': 13520,
u'gsk': 13517,
u'sra': 13516,
u'opb': 13506,
u'iko': 13493,
u'kar': 13492,
u'l\xe5e': 13479,
u'tif': 13455,
u'la#': 13440,
u'saf': 13400,
u'ova': 13381,
u'elh': 13317,
u'dny': 13275,
u'tus': 13275,
u'\xe5bn': 13237,
u'the': 13213,
u'\xe6be': 13213,
u'\xe5el': 13205,
u'mko': 13171,
u'inj': 13155,
u'nfl': 13151,
u'ndo': 13148,
u'dit': 13147,
u'pst': 13142,
u'mas': 13139,
u'alm': 13132,
u'v#t': 13107,
u'v#e': 13085,
u'##v': 13081,
u'mmu': 13079,
u'dg\xf8': 13062,
u'omt': 13045,
u'oma': 13043,
u'o#m': 13035,
u'avs': 13030,
u'eal': 13019,
u'oph': 13006,
u'nje': 13005,
u'k#b': 12988,
u'\xe5rl': 12957,
u'rse': 12936,
u'k#k': 12899,
u'afv': 12882,
u'rib': 12878,
u'cit': 12875,
u'rmo': 12855,
u'lgt': 12824,
u'ded': 12810,
u'edf': 12793,
u'#sc': 12791,
u'gem': 12768,
u'bry': 12715,
u'##t': 12715,
u'el\xe6': 12709,
u'sgr': 12685,
u'arr': 12681,
u'nhv': 12669,
u'sna': 12668,
u'hos': 12652,
u'tf\xe6': 12633,
u'mbi': 12622,
u'siv': 12604,
u'oko': 12578,
u'rhv': 12553,
u't#c': 12529,
u'afi': 12528,
u'ls\xe6': 12508,
u'lyd': 12498,
u'ysk': 12463,
u'nbe': 12461,
u'v\xe6l': 12461,
u'gli': 12458,
u'o#o': 12429,
u'tse': 12426,
u'ese': 12425,
u'mkr': 12423,
u'\xe5s#': 12418,
u'his': 12388,
u'mre': 12348,
u'ni#': 12344,
u'tok': 12311,
u'uta': 12310,
u'i#j': 12309,
u'iva': 12262,
u'byd': 12262,
u'etl': 12260,
u'#\xe6r': 12253,
u'oen': 12248,
u'pat': 12235,
u'api': 12193,
u'#fn': 12181,
u'rov': 12178,
u'o#f': 12174,
u'dec': 12172,
u'rk#': 12152,
u'#ab': 12147,
u'gul': 12108,
u'jse': 12070,
u'gal': 12055,
u'opp': 12036,
u'\xe6lg': 12022,
u'rka': 12020,
u'isn': 12006,
u'egu': 12005,
u'gri': 12002,
u'ls#': 11994,
u'ong': 11983,
u'slo': 11975,
u't\xf8j': 11946,
u'fej': 11929,
u'ron': 11918,
u'el\xf8': 11864,
u'pm\xf8': 11846,
u'dsr': 11839,
u'bra': 11804,
u'msi': 11803,
u'las': 11748,
u'lko': 11734,
u'er\xf8': 11726,
u'uaf': 11726,
u'a#p': 11720,
u'\xe6rm': 11717,
u'\xe6g#': 11674,
u'mri': 11585,
u'bun': 11567,
u'dmi': 11566,
u'\xe6si': 11561,
u'fgi': 11539,
u'tho': 11488,
u'lfr': 11458,
u'amb': 11421,
u'd\xf8m': 11415,
u'edv': 11412,
u'fli': 11408,
u'v#a': 11392,
u'eld': 11385,
u'tys': 11384,
u'v\xe5g': 11380,
u'i\xf8s': 11377,
u'syd': 11375,
u'afr': 11367,
u'jle': 11365,
u'dm\xe6': 11342,
u'dsi': 11314,
u'ask': 11308,
u'b#o': 11275,
u'nh\xe6': 11272,
u'lso': 11251,
u'ila': 11247,
u'dko': 11198,
u'v#v': 11163,
u'tsp': 11153,
u'#iv': 11140,
u'l\xe5r': 11135,
u'vem': 11088,
u'civ': 11087,
u'rsp': 11083,
u'\xf8b#': 11030,
u'aci': 11022,
u'eth': 11020,
u'hal': 11008,
u'd\xf8d': 10997,
u'nik': 10983,
u'gsb': 10979,
u'asi': 10978,
u'lts': 10963,
u'jli': 10959,
u'ham': 10958,
u'elu': 10946,
u'eyd': 10933,
u'o#s': 10921,
u'ton': 10878,
u'vli': 10866,
u'ukr': 10853,
u'odi': 10851,
u'#it': 10849,
u'use': 10843,
u'lga': 10812,
u'oor': 10793,
u'jem': 10783,
u'nsm': 10783,
u'dsg': 10768,
u'eor': 10757,
u'koo': 10729,
u'ulo': 10716,
u'rv\xe5': 10716,
u'vl#': 10703,
u'ovl': 10694,
u'urg': 10686,
u'uff': 10657,
u'igd': 10648,
u'rtu': 10633,
u'ute': 10619,
u'rko': 10618,
u'ope': 10615,
u'haf': 10614,
u'pag': 10604,
u'i#\xe5': 10591,
u'roe': 10575,
u'sak': 10566,
u'cif': 10565,
u'ygn': 10552,
u'tey': 10552,
u'jur': 10550,
u'syg': 10544,
u'eby': 10533,
u'ngr': 10529,
u'rhi': 10523,
u'gg\xf8': 10518,
u'u#b': 10510,
u'k\xf8b': 10494,
u'oso': 10491,
u'tsl': 10459,
u'oks': 10458,
u'gr\xf8': 10454,
u'ifa': 10425,
u'amh': 10417,
u's\xf8r': 10410,
u'r\xe6c': 10405,
u'\xf8nn': 10383,
u'lyt': 10377,
u'omb': 10373,
u's##': 10368,
u'elk': 10358,
u'inn': 10357,
u'#ur': 10350,
u'pit': 10341,
u'fam': 10329,
u'nno': 10321,
u'n\xf8j': 10298,
u'##b': 10282,
u'ef\xf8': 10282,
u'udl': 10278,
u'dso': 10277,
u'\xe5ds': 10272,
u'dby': 10257,
u'veg': 10239,
u'ofe': 10229,
u'ugt': 10221,
u'iga': 10200,
u'rni': 10175,
u'onc': 10170,
u'fem': 10158,
u'gsa': 10142,
u'fok': 10141,
u'onl': 10118,
u'bol': 10112,
u'tfo': 10105,
u'\xf8d#': 10088,
u'ryg': 10079,
u'lsl': 10072,
u'vn#': 10064,
u'f\xe5e': 10056,
u'emh': 10049,
u'u#v': 10037,
u'\xf8jt': 10031,
u'pby': 10030,
u'gla': 10027,
u'eka': 10025,
u'bt#': 10024,
u'k#u': 10014,
u'gik': 10003,
u'ilp': 9997,
u'uan': 9983,
u'n#c': 9961,
u'm##': 9946,
u'f##': 9946,
u't\xe6t': 9924,
u'itt': 9920,
u'lia': 9919,
u'dho': 9917,
u'iv\xe6': 9838,
u'bif': 9833,
u'ftl': 9818,
u'ga#': 9814,
u'ns\xf8': 9804,
u'rae': 9795,
u'ts\xe5': 9789,
u'afb': 9765,
u'usk': 9748,
u'mpo': 9746,
u'nl\xe6': 9718,
u'\xe6ci': 9717,
u'r\xf8m': 9716,
u'lpa': 9715,
u'ucc': 9714,
u'gn#': 9714,
u'##n': 9687,
u'nle': 9684,
u'uen': 9665,
u'mh\xf8': 9650,
u'#ob': 9642,
u'ggr': 9628,
u'iar': 9627,
u'mg\xe5': 9609,
u'kud': 9608,
u'v#h': 9524,
u'ko#': 9512,
u'lsy': 9504,
u'rvs': 9502,
u'v#m': 9488,
u'fes': 9473,
u'ddr': 9456,
u'ece': 9452,
u'ype': 9434,
u'o#e': 9429,
u'igi': 9424,
u'k\xf8r': 9418,
u'kyn': 9416,
u'a#u': 9405,
u'agg': 9405,
u'fvi': 9401,
u'kro': 9385,
u'ael': 9384,
u'fsk': 9376,
u'nsf': 9375,
u'mh\xe6': 9371,
u'b#a': 9345,
u'pal': 9338,
u'r#y': 9323,
u'sar': 9318,
u'epa': 9313,
u'rle': 9311,
u'ola': 9296,
u'mek': 9293,
u'uma': 9293,
u'#au': 9293,
u'e#y': 9290,
u'v\xe5b': 9283,
u'\xf8le': 9270,
u'suc': 9261,
u'l\xe5#': 9251,
u'adm': 9230,
u'lmi': 9224,
u'ofr': 9216,
u'dik': 9203,
u'vok': 9199,
u'geo': 9198,
u'rkt': 9191,
u'oti': 9166,
u'#uk': 9152,
u'lvf': 9144,
u'aly': 9141,
u'isr': 9108,
u'ome': 9101,
u'sov': 9093,
u'bit': 9080,
u'nyl': 9077,
u'egg': 9075,
u'ena': 9061,
u'efa': 9048,
u'nso': 9041,
u'p#a': 9028,
u'yli': 9015,
u's#j': 9013,
u'u#a': 8996,
u'r\xf8n': 8988,
u'#uf': 8969,
u'mla': 8966,
u'byr': 8964,
u'sfr': 8956,
u'pti': 8953,
u'fir': 8947,
u'ubl': 8947,
u'\xe5#j': 8940,
u'skl': 8935,
u'##p': 8929,
u'een': 8916,
u'kt\xf8': 8912,
u'\xf8g#': 8909,
u'r#\xe6': 8892,
u'lsa': 8891,
u'fet': 8862,
u'r#\xe5': 8846,
u'#ol': 8828,
u'\xf8ds': 8773,
u'kaf': 8764,
u'bio': 8749,
u'tso': 8724,
u'rg#': 8708,
u'rsy': 8681,
u'\xe5ed': 8678,
u'id\xe9': 8662,
u'don': 8658,
u'rv\xe6': 8655,
u'yr#': 8631,
u'nba': 8614,
u'tud': 8587,
u'gts': 8586,
u'rur': 8571,
u'msa': 8570,
u'\xe9n#': 8548,
u'eny': 8547,
u'i#c': 8539,
u'uxe': 8517,
u'jet': 8515,
u'ufo': 8506,
u'alb': 8496,
u'aue': 8484,
u'smu': 8449,
u'd\xe5r': 8449,
u'bso': 8446,
u'avi': 8427,
u'ein': 8414,
u'\xe6t#': 8409,
u'mga': 8409,
u'bou': 8405,
u'kut': 8401,
u'yrd': 8389,
u'hum': 8389,
u'sve': 8383,
u'cem': 8375,
u'io#': 8373,
u'sbo': 8369,
u'rds': 8368,
u'edb': 8355,
u'rnu': 8353,
u'vf\xf8': 8351,
u'##k': 8321,
u'lud': 8308,
u'ain': 8305,
u'gsi': 8304,
u'olv': 8299,
u'#um': 8283,
u'sib': 8280,
u'l\xf8n': 8278,
u'sim': 8257,
u'ulg': 8251,
u'obi': 8247,
u'tob': 8245,
u'via': 8245,
u'lon': 8244,
u'tun': 8240,
u'uar': 8234,
u'#gu': 8227,
u'nuf': 8224,
u'jul': 8222,
u'#ap': 8212,
u'vek': 8209,
u'yk\xf8': 8204,
u'nsn': 8201,
u'j\xf8e': 8200,
u'#\xe9n': 8163,
u'r\xf8v': 8158,
u'eb#': 8152,
u'rba': 8148,
u'neg': 8142,
u'opo': 8129,
u'nfr': 8121,
u'pr\xf8': 8111,
u'rh\xe5': 8109,
u'lom': 8101,
u'epu': 8098,
u'a#r': 8052,
u'ms\xe6': 8037,
u'dyb': 7998,
u't#y': 7974,
u'\xf8gt': 7962,
u'ja#': 7961,
u'ebr': 7961,
u'#d\xe5': 7952,
u'gag': 7944,
u'nab': 7939,
u'n#\xe6': 7903,
u'ube': 7890,
u'div': 7868,
u'ovo': 7854,
u'\xf8et': 7850,
u'p#o': 7842,
u'o#a': 7830,
u'o#i': 7828,
u'dsv': 7820,
u'ium': 7808,
u'ml\xe6': 7808,
u'd\xe6k': 7803,
u'\xe5di': 7801,
u'aki': 7800,
u'jun': 7792,
u'p\xe5p': 7790,
u'sda': 7787,
u'eda': 7775,
u'\xe5vi': 7767,
u'sep': 7756,
u'r\xe5r': 7742,
u'dic': 7722,
u'p\xe5v': 7721,
u'hid': 7711,
u'sha': 7702,
u'ose': 7700,
u'ygd': 7698,
u'gsr': 7693,
u'erp': 7691,
u'eke': 7683,
u'v#b': 7681,
u'\xe5pe': 7679,
u'dto': 7676,
u'lha': 7664,
u'ngd': 7662,
u'#ub': 7659,
u'pes': 7654,
u'sth': 7642,
u'see': 7640,
u'j\xf8#': 7639,
u'ula': 7624,
u'gas': 7622,
u'k#l': 7621,
u'yt#': 7621,
u'\xe5rd': 7614,
u'oer': 7580,
u'mak': 7565,
u'p\xe5g': 7562,
u'imu': 7547,
u'#d\xe6': 7545,
u'ndk': 7544,
u'ipo': 7532,
u'esa': 7532,
u'p#i': 7527,
u'lf\xf8': 7516,
u'\xe6rs': 7512,
u'fho': 7510,
u'nam': 7475,
u'bak': 7470,
u'k\xe5r': 7468,
u'v#u': 7462,
u'mbo': 7456,
u'ks\xe6': 7443,
u'tst': 7431,
u'tam': 7419,
u'al\xe6': 7414,
u'kov': 7412,
u'#du': 7407,
u'alk': 7388,
u'#v\xe5': 7369,
u'gse': 7365,
u'u#t': 7365,
u'jan': 7360,
u'\xf8sh': 7355,
u'pub': 7354,
u'n#\xe5': 7342,
u'eaf': 7339,
u'ma#': 7318,
u'lic': 7307,
u'nto': 7306,
u'jes': 7281,
u'fel': 7281,
u'#ej': 7280,
u'mad': 7259,
u'g#c': 7254,
u'h\xe5r': 7247,
u'ols': 7247,
u'anb': 7240,
u'adv': 7239,
u'adt': 7232,
u'dsb': 7228,
u'lyg': 7218,
u'nr#': 7205,
u'ynl': 7204,
u'kl#': 7188,
u'nvo': 7177,
u'tfa': 7177,
u'f\xe6n': 7169,
u'urf': 7166,
u'irl': 7152,
u'nni': 7148,
u'rra': 7148,
u'abt': 7135,
u'\xe5ka': 7130,
u'lod': 7112,
u'\xe5g\xe6': 7108,
u'o#t': 7107,
u'ito': 7107,
u'nsr': 7105,
u'a#n': 7101,
u'lue': 7098,
u'if\xf8': 7097,
u'lil': 7093,
u'esf': 7090,
u'\xe6ni': 7088,
u'elf': 7086,
u'#nr': 7085,
u'\xe5gn': 7064,
u'eak': 7055,
u'u#d': 7035,
u'dka': 7021,
u'n#y': 7009,
u'yni': 7007,
u'eev': 7006,
u's\xe5k': 7006,
u'vsl': 6981,
u'tl\xe6': 6979,
u'fts': 6970,
u'teb': 6968,
u'odn': 6966,
u'mob': 6964,
u'ksl': 6922,
u'vag': 6917,
u'\xe9t#': 6905,
u'#\xf8d': 6904,
u'rto': 6899,
u'efe': 6895,
u'udm': 6893,
u'gam': 6891,
u'pot': 6889,
u'nsu': 6882,
u'vs#': 6870,
u'nim': 6870,
u'p\xe5t': 6869,
u'ktr': 6852,
u'br\xe6': 6846,
u'asy': 6846,
u'kot': 6828,
u'ffa': 6812,
u'aut': 6807,
u'typ': 6800,
u'a#l': 6786,
u'dif': 6785,
u'dek': 6782,
u'fry': 6766,
u's\xe5s': 6750,
u'#kn': 6745,
u'kib': 6733,
u'sub': 6720,
u'tug': 6720,
u'ped': 6713,
u'dfl': 6710,
u'yri': 6692,
u'hil': 6691,
u'gna': 6690,
u'gsv': 6685,
u'd\xf8v': 6680,
u'm#\xf8': 6679,
u'fla': 6678,
u'msl': 6671,
u'\xf8s#': 6662,
u'sup': 6652,
u'atl': 6628,
u'ftf': 6626,
u'#il': 6621,
u'efr': 6617,
u'#ky': 6606,
u'lk\xe5': 6604,
u'fns': 6599,
u'emr': 6598,
u'jl#': 6598,
u'#av': 6589,
u'\xe5so': 6584,
u'ddi': 6580,
u'm#\xe5': 6576,
u'fod': 6574,
u'#ec': 6572,
u'bog': 6561,
u'elo': 6557,
u'l\xf8f': 6555,
u'dt\xe6': 6554,
u'tyk': 6544,
u'mta': 6531,
u'job': 6528,
u'dbe': 6525,
u'#pi': 6523,
u'a#j': 6517,
u'agl': 6516,
u'ura': 6506,
u'bos': 6503,
u'o#h': 6499,
u'dsy': 6495,
u'eum': 6486,
u'git': 6476,
u'#a#': 6462,
u'b#i': 6461,
u'arn': 6461,
u'k#r': 6459,
u'ofi': 6458,
u'ibi': 6458,
u'epo': 6450,
u'syl': 6447,
u'obe': 6437,
u'gfo': 6425,
u'ian': 6425,
u'mpa': 6419,
u'ilh': 6411,
u'gsu': 6395,
u'nsb': 6387,
u'uns': 6384,
u'dim': 6362,
u'auk': 6359,
u'stu': 6357,
u'b#s': 6347,
u'bsi': 6336,
u'bye': 6326,
u'uto': 6311,
u'o#p': 6299,
u'aro': 6299,
u'ndm': 6292,
u'p#f': 6290,
u'\xe6k#': 6286,
u'ud\xf8': 6276,
u's\xe5v': 6260,
u'irs': 6254,
u'ear': 6249,
u'jst': 6249,
u'\xf8jd': 6243,
u'cee': 6236,
u'p\xe5l': 6229,
u'j#g': 6223,
u'\xe5ve': 6223,
u'tr\xe5': 6186,
u'eh\xf8': 6174,
u'rp\xe5': 6162,
u't\xe6g': 6158,
u'rtj': 6158,
u'pir': 6137,
u'anu': 6135,
u'\xe6er': 6132,
u'rnb': 6129,
u't\xe6l': 6116,
u'ica': 6115,
u'u#g': 6114,
u'p#p': 6114,
u'lif': 6108,
u'los': 6104,
u'alp': 6096,
u'has': 6092,
u'obb': 6092,
u'rip': 6092,
u'arv': 6075,
u'isb': 6073,
u'alj': 6066,
u'rai': 6058,
u'a#g': 6049,
u'lre': 6043,
u'mra': 6036,
u'ruf': 6032,
u'#ok': 6028,
u'k##': 6027,
u'bse': 6025,
u'rab': 6022,
u'ip#': 6021,
u'd\xf8r': 6005,
u'obs': 5983,
u'ech': 5982,
u'sum': 5969,
u'g\xe6n': 5965,
u'rue': 5960,
u'wto': 5957,
u'uac': 5937,
u'apr': 5924,
u'f#c': 5924,
u'riu': 5915,
u'ilo': 5899,
u'eud': 5892,
u'bbe': 5891,
u'r\xf8d': 5885,
u's\xe5r': 5882,
u'ubs': 5874,
u'eag': 5865,
u'im\xe6': 5848,
u'rd\xf8': 5838,
u'm\xe5t': 5827,
u'opd': 5816,
u'seo': 5804,
u'um\xe6': 5799,
u'tsf': 5798,
u'omg': 5796,
u'edo': 5794,
u'ca#': 5793,
u'#\xe9t': 5786,
u's#\xe6': 5782,
u'eb\xe6': 5780,
u'yer': 5777,
u'lvi': 5776,
u'dov': 5774,
u'li#': 5768,
u'pf\xf8': 5755,
u'nko': 5754,
u'b#f': 5747,
u'ams': 5746,
u'utr': 5745,
u'ma\xe6': 5743,
u'a\xe6n': 5742,
u'elm': 5741,
u'tsb': 5739,
u'iso': 5733,
u'elb': 5724,
u'tsk': 5723,
u'##l': 5713,
u'##c': 5710,
u'df\xe6': 5703,
u'edk': 5703,
u'sce': 5697,
u'#wt': 5689,
u'jt#': 5685,
u'v#p': 5684,
u'p#d': 5683,
u'nua': 5681,
u'k#g': 5680,
u'sbr': 5673,
u'pac': 5671,
u'ivs': 5666,
u'sge': 5655,
u'ov\xe6': 5655,
u'umm': 5647,
u'ngo': 5645,
u'rl\xe6': 5638,
u'ttr': 5632,
u'dbo': 5624,
u'lhe': 5620,
u'air': 5614,
u'mtr': 5613,
u'es\xf8': 5611,
u'ril': 5607,
u'ssp': 5603,
u'b#t': 5594,
u'##u': 5592,
u'urs': 5591,
u'\xe5tt': 5586,
u'mba': 5581,
u'p\xe6e': 5581,
u'gum': 5579,
u'sfa': 5560,
u'rgu': 5552,
u'ott': 5550,
u'isl': 5537,
u'ejr': 5520,
u'ich': 5519,
u'p\xe5s': 5519,
u'idn': 5516,
u'udk': 5507,
u'gns': 5505,
u'cin': 5503,
u'n\xe6g': 5498,
u'ppl': 5496,
u'lvs': 5489,
u'p#m': 5475,
u'maj': 5466,
u'xel': 5465,
u'stk': 5464,
u'rar': 5462,
u'fag': 5461,
u'eul': 5457,
u'bni': 5456,
u'#ry': 5456,
u'rux': 5456,
u'asn': 5451,
u'ric': 5451,
u'mot': 5442,
u'vot': 5440,
u'dep': 5431,
u'et\xf8': 5425,
u'dob': 5421,
u't\xf8d': 5414,
u'ego': 5410,
u'erc': 5403,
u'kep': 5398,
u'aus': 5395,
u'mov': 5394,
u'ipl': 5380,
u'uin': 5373,
u'ul#': 5373,
u'pin': 5367,
u'ir#': 5358,
u'rmu': 5347,
u'k#n': 5337,
u'ifo': 5329,
u'pt#': 5326,
u'v#k': 5326,
u'esm': 5322,
u'nej': 5300,
u'tsg': 5294,
u'klo': 5294,
u'akn': 5291,
u'lsb': 5283,
u'l#\xf8': 5279,
u'nk#': 5270,
u'#uh': 5263,
u'#hy': 5261,
u'#dv': 5258,
u'nsl': 5251,
u'jre': 5246,
u'isu': 5246,
u'\xe6ke': 5241,
u'aer': 5235,
u'enf': 5235,
u'l\xe5s': 5229,
u'rt\xe6': 5221,
u'sev': 5209,
u'#es': 5208,
u'fba': 5206,
u'mio': 5197,
u'mum': 5196,
u'bul': 5191,
u'vom': 5181,
u'mik': 5178,
u'#cy': 5176,
u'd\xe9e': 5174,
u'dio': 5169,
u'ffi': 5164,
u'dvs': 5164,
u'ovi': 5156,
u'\xf8nt': 5155,
u'eov': 5139,
u'pon': 5137,
u'rok': 5117,
u'esh': 5115,
u'jne': 5105,
u'#ue': 5102,
u'ano': 5102,
u'ace': 5102,
u'fil': 5087,
u'#uo': 5087,
u'ti\xf8': 5087,
u'\xe6kn': 5079,
u'uly': 5069,
u'rut': 5066,
u'tha': 5059,
u'mae': 5058,
u'igr': 5053,
u'kem': 5046,
u'teu': 5042,
u'#fy': 5040,
u'o#d': 5037,
u'pis': 5028,
u'fsp': 5014,
u'etu': 5010,
u'u#p': 5002,
u'vo#': 5000,
u'or\xe5': 5000,
u'pej': 4996,
u'inu': 4990,
u'\xe6gn': 4986,
u'sed': 4979,
u'rny': 4976,
u'uga': 4972,
u'etv': 4971,
u'gim': 4955,
u'vic': 4954,
u'vst': 4948,
u'd#\xf8': 4948,
u'pie': 4942,
u'afk': 4939,
u'stf': 4938,
u'mha': 4915,
u'sv#': 4914,
u'lsi': 4889,
u's#c': 4881,
u'dal': 4880,
u'bsp': 4875,
u'lly': 4875,
u'kny': 4870,
u'sus': 4860,
u'k\xf8d': 4854,
u'dtl': 4849,
u'elg': 4843,
u'sdi': 4834,
u'bs#': 4831,
u'vun': 4829,
u'sp\xe6': 4825,
u'sk\xf8': 4821,
u'sf\xf8': 4816,
u'cyp': 4801,
u'rac': 4789,
u'sue': 4785,
u'lgi': 4783,
u'geb': 4783,
u'b#m': 4782,
u'umu': 4776,
u'j\xf8m': 4770,
u'\xf8n#': 4747,
u'eub': 4726,
u'aj#': 4707,
u'tr\xf8': 4701,
u'\xf8mt': 4700,
u'ean': 4693,
u'car': 4686,
u'eg\xf8': 4682,
u'im\xf8': 4678,
u'r#\xe9': 4676,
u'ako': 4669,
u'cap': 4668,
u'rns': 4658,
u'umi': 4657,
u'kue': 4651,
u'fro': 4632,
u'stj': 4629,
u'rbl': 4627,
u'r\xf8g': 4619,
u'okt': 4615,
u'uve': 4607,
u'##g': 4604,
u'sba': 4600,
u'rn\xe6': 4598,
u'eup': 4586,
u'#\xe6g': 4586,
u'sbu': 4582,
u'did': 4576,
u'amo': 4571,
u'#th': 4562,
u'gha': 4560,
u'lfo': 4558,
u'#if': 4550,
u'dr\xe6': 4548,
u'plo': 4536,
u'oke': 4531,
u'kvo': 4529,
u'sym': 4528,
u'oss': 4520,
u'kea': 4520,
u'etp': 4518,
u'lpo': 4513,
u'tsi': 4507,
u'p#s': 4496,
u'sic': 4493,
u'rhu': 4486,
u'v#g': 4485,
u'\xe5ge': 4484,
u'sas': 4480,
u'dsh': 4476,
u'iki': 4461,
u'adf': 4459,
u'lgs': 4454,
u'gno': 4449,
u'con': 4446,
u'tko': 4438,
u'okk': 4433,
u'o#b': 4432,
u'bro': 4426,
u'ota': 4419,
u'\xe5b#': 4417,
u'nut': 4415,
u'ph\xe6': 4412,
u's\xe6d': 4409,
u'lbe': 4408,
u'bje': 4404,
u'vie': 4402,
u'roa': 4396,
u'\xe5ta': 4391,
u'g#\xe6': 4390,
u'vak': 4386,
u'sr\xe5': 4382,
u'jag': 4377,
u'rt\xf8': 4371,
u'rdt': 4371,
u'#on': 4370,
u'em\xf8': 4354,
u'oom': 4351,
u'not': 4350,
u'\xf8m\xe6': 4347,
u'sil': 4345,
u'ums': 4344,
u'udo': 4344,
u'\xf8jn': 4329,
u'lme': 4326,
u'p\xe6n': 4324,
u'mus': 4322,
u'lka': 4319,
u'ksu': 4319,
u'\xe5st': 4315,
u'dsd': 4313,
u'ino': 4287,
u'rgr': 4287,
u'\xe5ri': 4280,
u'gip': 4271,
u'#t\xf8': 4267,
u'tum': 4261,
u'ntn': 4259,
u'p#h': 4257,
u's#\xe5': 4256,
u'anf': 4256,
u'o#v': 4251,
u'geh': 4243,
u'lg\xe6': 4241,
u'rsd': 4230,
u'l#\xe6': 4226,
u'aru': 4225,
u'#wa': 4213,
u'jed': 4203,
u'cel': 4199,
u'h\xf8v': 4193,
u'udr': 4181,
u'ksa': 4180,
u'uts': 4169,
u'isc': 4168,
u'nha': 4162,
u'iot': 4156,
u'its': 4156,
u'm\xf8s': 4152,
u'eku': 4151,
u'iks': 4150,
u'nic': 4142,
u'sfu': 4141,
u'or\xf8': 4136,
u'gor': 4134,
u'nf\xf8': 4130,
u'tpr': 4128,
u'rv#': 4127,
u'afl': 4117,
u'vre': 4099,
u'ief': 4098,
u'mve': 4096,
u'kne': 4086,
u'tk#': 4086,
u'ngf': 4082,
u'ob#': 4081,
u'fn#': 4079,
u'gef': 4071,
u'v#r': 4071,
u'esb': 4071,
u'b#p': 4070,
u'at\xf8': 4067,
u'u#l': 4067,
u'due': 4058,
u'\xe5rh': 4057,
u'lby': 4054,
u'keh': 4050,
u'dv\xe6': 4049,
u'tpo': 4044,
u'osv': 4043,
u'rpo': 4034,
u'nch': 4031,
u'chi': 4030,
u'fl\xf8': 4030,
u'lea': 4029,
u'\xf8ls': 4027,
u'd#c': 4026,
u'm#\xe6': 4019,
u'sne': 4015,
u'idu': 4012,
u'rpr': 4011,
u'rby': 4010,
u'#pp': 4005,
u'#d\xe9': 4004,
u'yge': 3995,
u'ns\xe6': 3993,
u'epl': 3993,
u'can': 3989,
u'r#w': 3984,
u'arh': 3981,
u'bus': 3979,
u'f#j': 3970,
u'ybe': 3969,
u'b#e': 3966,
u'er\xe5': 3961,
u'itl': 3960,
u'ys#': 3959,
u'b#d': 3958,
u'dip': 3957,
u'f#\xf8': 3956,
u'ci#': 3955,
u'\xe5li': 3952,
u'arc': 3952,
u'vle': 3949,
u'\xf8jl': 3945,
u'syv': 3945,
u'goe': 3938,
u'lm\xe6': 3929,
u'roo': 3923,
u'etm': 3923,
u'\xf8gn': 3916,
u'lsv': 3907,
u'osk': 3905,
u'unt': 3899,
u'epe': 3892,
u'opi': 3887,
u'lau': 3870,
u'sad': 3869,
u'ake': 3865,
u'lac': 3853,
u'ptr': 3849,
u'stb': 3848,
u'fr#': 3837,
u'hul': 3830,
u'#sj': 3829,
u'b#h': 3829,
u'uba': 3822,
u'fbr': 3819,
u'uet': 3814,
u'kod': 3813,
u'tvu': 3812,
u'es\xe6': 3810,
u'eom': 3809,
u'mos': 3809,
u'rdo': 3805,
u'jou': 3801,
u'll\xe6': 3791,
u'dgr': 3790,
u'\xe6bn': 3784,
u'rtf': 3783,
u'fgh': 3782,
u'or\xe6': 3779,
u'hef': 3778,
u'eut': 3778,
u'ukl': 3769,
u'bie': 3764,
u'yd#': 3753,
u'gro': 3753,
u'mp#': 3743,
u'ias': 3742,
u'ash': 3742,
u'laf': 3736,
u'tai': 3735,
u'pr#': 3730,
u'#b#': 3728,
u'boe': 3713,
u'tva': 3709,
u'n\xe5s': 3709,
u'hor': 3703,
u's\xf8j': 3702,
u'ef#': 3693,
u'sej': 3692,
u'nap': 3687,
u'r\xe6g': 3685,
u'tau': 3681,
u'plu': 3666,
u'imm': 3665,
u'dl\xf8': 3661,
u'ekk': 3657,
u'm\xf8n': 3656,
u'kys': 3655,
u'ies': 3654,
u'vfo': 3654,
u'ly#': 3647,
u'lsp': 3646,
u'une': 3643,
u'aud': 3642,
u'mep': 3630,
u'gyl': 3629,
u'tot': 3629,
u'gsc': 3629,
u'#ii': 3615,
u'kev': 3603,
u'dly': 3598,
u'asb': 3596,
u'u##': 3594,
u'#l\xe5': 3586,
u'#\xe6l': 3583,
u'alf': 3580,
u'er\xe6': 3570,
u'ki#': 3568,
u'teo': 3565,
u'gun': 3559,
u'\xe6sk': 3556,
u'r\xe6l': 3556,
u'skv': 3555,
u'ilv': 3551,
u'yve': 3530,
u'mbu': 3529,
u'rh\xf8': 3526,
u'rfl': 3524,
u'#r\xf8': 3519,
u'isf': 3519,
u'dh\xe6': 3518,
u'lpr': 3516,
u'rru': 3514,
u'p#e': 3513,
u'l#c': 3508,
u'j#t': 3506,
u'ch#': 3506,
u'etj': 3499,
u'cho': 3496,
u'n\xf8g': 3496,
u'kv\xe6': 3490,
u'rl\xf8': 3486,
u'h\xe6r': 3477,
u'ri\xf8': 3476,
u'efl': 3472,
u'lth': 3471,
u's#y': 3471,
u'm#c': 3466,
u'nep': 3463,
u'lip': 3461,
u'tib': 3460,
u'd\xe9#': 3459,
u'ftt': 3454,
u'ez#': 3453,
u'o#\xe5': 3453,
u'd#\xe6': 3450,
u'#\xe5n': 3447,
u'ibl': 3444,
u'tbe': 3441,
u'mse': 3440,
u'y#f': 3433,
u'suv': 3432,
u'#c#': 3428,
u'l\xe5n': 3425,
u'v#n': 3422,
u'ms#': 3419,
u'u#r': 3407,
u'nsh': 3393,
u'pl#': 3393,
u'tle': 3388,
u'so#': 3385,
u'tep': 3384,
u'ack': 3382,
u'va#': 3377,
u'ykt': 3376,
u'abr': 3371,
u'upt': 3370,
u'\xf8js': 3364,
u'kuf': 3358,
u'nyd': 3353,
u'rdl': 3349,
u'ump': 3346,
u'msn': 3343,
u'of#': 3339,
u'rmn': 3339,
u'ea#': 3336,
u'#cu': 3332,
u'omo': 3332,
u'stp': 3331,
u'\xf8gl': 3329,
u'ggj': 3327,
u'pho': 3322,
u'ytr': 3321,
u'ecu': 3317,
u'tsu': 3313,
u'osl': 3312,
u'\xe5#\xf8': 3310,
u'usp': 3310,
u'tto': 3307,
u'nre': 3306,
u'daf': 3306,
u'eog': 3305,
u'\xe9er': 3302,
u'sef': 3295,
u'emk': 3292,
u'\xf8ja': 3286,
u'ck#': 3282,
u'#we': 3280,
u'taf': 3276,
u'ada': 3271,
u'seu': 3271,
u'y#o': 3267,
u'dsu': 3265,
u'oat': 3263,
u'nek': 3262,
u'#ps': 3259,
u'\xf8#o': 3255,
u'co#': 3252,
u'urm': 3248,
u'udi': 3245,
u'dtg': 3241,
u'g#\xe5': 3237,
u'r\xe6t': 3236,
u'gv\xe6': 3231,
u'non': 3230,
u'kif': 3227,
u'sur': 3224,
u'koh': 3205,
u'fab': 3204,
u'ipr': 3201,
u'koe': 3198,
u'leo': 3196,
u'eat': 3191,
u'had': 3190,
u'y#s': 3187,
u'lh\xf8': 3185,
u'ytn': 3184,
u'ydi': 3183,
u'\xe5dt': 3177,
u'\xe9r#': 3171,
u'lux': 3171,
u'vls': 3159,
u'esr': 3158,
u'ngv': 3158,
u'akr': 3144,
u'flo': 3143,
u'shi': 3138,
u'nma': 3136,
u'pul': 3133,
u'jus': 3133,
u'tg\xf8': 3129,
u'tve': 3128,
u'urn': 3127,
u'#uu': 3121,
u'irr': 3110,
u'nty': 3110,
u'\xe5dg': 3107,
u'dfa': 3105,
u'i#w': 3102,
u'\xf8rl': 3099,
u'u#n': 3098,
u'usu': 3094,
u'lss': 3091,
u'\xe6dv': 3087,
u't\xe5l': 3085,
u'tef': 3083,
u'eui': 3073,
u'agr': 3073,
u'lef': 3072,
u'xem': 3072,
u'isp': 3071,
u'kki': 3067,
u'sm\xf8': 3064,
u'ph\xf8': 3062,
u'ysi': 3062,
u'tos': 3060,
u'gos': 3058,
u'\xf8er': 3054,
u'd\xe9r': 3050,
u'smv': 3044,
u'\xe5rt': 3043,
u'#uv': 3041,
u's\xe6n': 3040,
u'edm': 3040,
u'ugo': 3035,
u'cu#': 3035,
u'nn#': 3035,
u'nos': 3033,
u'ubo': 3030,
u'yet': 3025,
u'mpi': 3021,
u'#ng': 3017,
u'\xe5l\xe6': 3017,
u'g\xe5s': 3012,
u'hyg': 3006,
u'ynt': 3006,
u'ftv': 3004,
u'iha': 3003,
u'dum': 2997,
u'zon': 2993,
u'in\xe6': 2992,
u'mom': 2992,
u'tm\xe6': 2990,
u'euf': 2989,
u'olt': 2984,
u'fth': 2979,
u'ibs': 2978,
u'nas': 2978,
u'yna': 2971,
u'l\xf8r': 2961,
u'g#y': 2959,
u'feb': 2953,
u'ftr': 2952,
u'com': 2950,
u'gsg': 2946,
u's\xf8k': 2946,
u'ugi': 2939,
u'inl': 2938,
u'#wi': 2937,
u'##r': 2936,
u'nsd': 2935,
u'llo': 2934,
u'ndp': 2933,
u'pmu': 2932,
u'big': 2931,
u'gsd': 2925,
u'dbu': 2924,
u'o#g': 2919,
u'ava': 2918,
u'nbo': 2909,
u'eol': 2904,
u'udp': 2901,
u'ilr': 2898,
u'arf': 2894,
u'o#k': 2893,
u'sri': 2892,
u'f#\xe6': 2890,
u'dyn': 2889,
u'mia': 2887,
u'b#v': 2886,
u'ydd': 2878,
u'afo': 2876,
u'vbe': 2873,
u'nef': 2873,
u'fi#': 2870,
u'dad': 2870,
u'smo': 2868,
u'col': 2855,
u'esy': 2855,
u'ife': 2853,
u'ybt': 2852,
u'omv': 2848,
u'fed': 2848,
u'thv': 2847,
u'gi\xf8': 2845,
u'ykl': 2845,
u'utv': 2839,
u'tei': 2838,
u'lor': 2832,
u'u#u': 2831,
u'kyg': 2830,
u'yen': 2829,
u'rys': 2828,
u'lbr': 2827,
u'bst': 2825,
u'bsr': 2821,
u'tta': 2820,
u'seg': 2816,
u'skj': 2810,
u'ii#': 2809,
u'ock': 2807,
u'go#': 2805,
u'apo': 2798,
u'rsb': 2796,
u'fac': 2783,
u'ifu': 2775,
u'sdo': 2775,
u'jug': 2774,
u'gue': 2768,
u'kop': 2767,
u'no#': 2759,
u'rnt': 2757,
u'sof': 2756,
u'm\xe6l': 2753,
u'ipi': 2752,
u'\xe9en': 2751,
u'\xe6mm': 2751,
u'rmt': 2751,
u'ush': 2751,
u'rn\xf8': 2746,
u'tpa': 2745,
u'#yt': 2739,
u'deo': 2736,
u'rci': 2734,
u'ebo': 2726,
u'rua': 2724,
u'rn\xe5': 2714,
u's\xe6l': 2705,
u'evo': 2704,
u'\xe6lk': 2701,
u'vsk': 2700,
u'ado': 2692,
u'he#': 2690,
u'i#\xe6': 2687,
u'beb': 2684,
u'uhe': 2683,
u'cht': 2681,
u'smy': 2680,
u'\xe5#c': 2678,
u'rpe': 2676,
u'esn': 2676,
u'dpr': 2675,
u'fai': 2673,
u'aku': 2665,
u'tav': 2663,
u'sga': 2658,
u'nea': 2654,
u'sgi': 2652,
u'oet': 2651,
u'dor': 2650,
u'\xe6rp': 2648,
u'avl': 2647,
u'lks': 2646,
u'eop': 2644,
u'etk': 2642,
u'\xe6rg': 2641,
u'npr': 2641,
u'dsn': 2640,
u'itr': 2640,
u'fsa': 2639,
u'v#l': 2639,
u'j\xf8b': 2638,
u'lgr': 2636,
u'mea': 2635,
u'#bn': 2635,
u'#ot': 2631,
u'bom': 2631,
u'oda': 2631,
u'lva': 2631,
u'sby': 2629,
u'ds\xf8': 2626,
u'sci': 2622,
u'\xe6vd': 2621,
u'cub': 2618,
u'pid': 2618,
u'jap': 2616,
u'fl\xe5': 2615,
u'uun': 2614,
u'ugu': 2613,
u'pda': 2612,
u'eun': 2611,
u'tsv': 2608,
u'l\xf8j': 2597,
u'imr': 2595,
u'aka': 2593,
u'fkl': 2592,
u'sv\xf8': 2591,
u'rf\xe6': 2588,
u'ugl': 2586,
u'cas': 2586,
u'maf': 2585,
u'cor': 2585,
u'dn\xe6': 2579,
u'\xe5lr': 2573,
u'p\xe5b': 2571,
u'ymb': 2565,
u'agi': 2562,
u'uop': 2561,
u'lda': 2558,
u'vas': 2558,
u'l\xe5d': 2555,
u'lvd': 2555,
u'ehe': 2554,
u'pur': 2553,
u'tac': 2551,
u'\xe5rb': 2544,
u'ukn': 2535,
u'kju': 2535,
u'\xe6no': 2533,
u'agh': 2532,
u'rlo': 2531,
u'\xf8en': 2529,
u'ba#': 2528,
u'c##': 2525,
u'r\xe5b': 2524,
u'br\xf8': 2522,
u'adr': 2518,
u'dpe': 2518,
u'put': 2517,
u'rgo': 2516,
u'sop': 2513,
u'un\xf8': 2513,
u'aga': 2510,
u'tic': 2510,
u'fve': 2509,
u'omd': 2509,
u'fus': 2503,
u'lyv': 2503,
u'fys': 2503,
u'ss#': 2502,
u'bsf': 2498,
u'agf': 2497,
u'osn': 2490,
u'bad': 2481,
u'kyd': 2476,
u'urk': 2475,
u'iud': 2473,
u'ed\xf8': 2473,
u'do#': 2472,
u'lbu': 2470,
u'#sh': 2469,
u'gin': 2469,
u'dol': 2466,
u'ap#': 2466,
u'rs\xe6': 2465,
u'obj': 2460,
u'\xf8nb': 2458,
u'pha': 2449,
u'sbi': 2448,
u'maa': 2447,
u'iom': 2447,
u'yl#': 2440,
u'cos': 2440,
u'ldo': 2439,
u'lsm': 2437,
u'j\xf8p': 2437,
u'oga': 2435,
u'ek\xf8': 2435,
u'iol': 2435,
u'sia': 2433,
u'vso': 2432,
u'll#': 2432,
u'f#\xe5': 2424,
u'\xf8di': 2421,
u'iaf': 2420,
u'ovb': 2418,
u'sea': 2416,
u'sho': 2414,
u'upl': 2413,
u'o#l': 2412,
u'onn': 2405,
u'fsn': 2402,
u'yot': 2401,
u'o#u': 2396,
u'wal': 2391,
u'tba': 2389,
u'urp': 2385,
u'o\xf8k': 2385,
u'lh\xe6': 2380,
u'kef': 2378,
u'uko': 2377,
u'ue#': 2375,
u'v##': 2373,
u'hem': 2371,
u'aen': 2369,
u'tfr': 2369,
u'yv#': 2367,
u'ogn': 2364,
u'k\xe6d': 2364,
u'mbr': 2359,
u'oha': 2359,
u'nad': 2359,
u'lto': 2358,
u'ymp': 2354,
u'pig': 2354,
u'lgk': 2347,
u'dry': 2347,
u'kko': 2347,
u'cil': 2338,
u'stt': 2337,
u'rr\xe5': 2333,
u'nbr': 2332,
u'bsk': 2328,
u'oin': 2321,
u'enz': 2315,
u'cef': 2314,
u'vog': 2314,
u'edh': 2307,
u'#up': 2305,
u'aha': 2302,
u'opk': 2302,
u'asa': 2296,
u'lfu': 2295,
u'eir': 2294,
u'e#w': 2293,
u'nks': 2290,
u'l\xf8d': 2289,
u'fgr': 2288,
u'np#': 2284,
u'kyo': 2276,
u'olm': 2273,
u'#\xf8e': 2273,
u'gol': 2273,
u'rm\xf8': 2273,
u'bte': 2268,
u'ail': 2265,
u'bsa': 2265,
u'it\xe9': 2265,
u'neu': 2263,
u'a#c': 2259,
u'jts': 2257,
u'mfo': 2256,
u'reh': 2253,
u'pse': 2249,
u'js#': 2246,
u'hau': 2246,
u'#ai': 2245,
u'ng\xe6': 2245,
u'fs\xe6': 2245,
u'mug': 2241,
u'gep': 2240,
u'nuk': 2236,
u'ydl': 2233,
u'ldb': 2231,
u'bnp': 2230,
u'\xe5#\xe5': 2229,
u'\xe6bt': 2227,
u'ogt': 2226,
u'cam': 2222,
u'y#t': 2220,
u'kir': 2217,
u'reu': 2215,
u'nfa': 2211,
u'mtl': 2210,
u'uis': 2207,
u'atr': 2204,
u'edg': 2202,
u'msr': 2200,
u'chw': 2199,
u'suk': 2199,
u'mol': 2198,
u'iba': 2194,
u'nna': 2191,
u'l#y': 2190,
u'dro': 2187,
u'mpu': 2186,
u'by#': 2186,
u'ydr': 2184,
u'ry#': 2183,
u'idb': 2182,
u'rgt': 2180,
u'hle': 2179,
u'y#i': 2176,
u'iag': 2175,
u'msp': 2174,
u'aba': 2173,
u'ofa': 2171,
u'bod': 2171,
u'ilm': 2171,
u'fit': 2170,
u'b\xf8d': 2169,
u'j##': 2168,
u'mhy': 2168,
u'ypt': 2168,
u'\xe5n#': 2168,
u'ty#': 2166,
u'ppi': 2164,
u'o#n': 2163,
u'flu': 2157,
u'cio': 2151,
u'lep': 2148,
u'mef': 2146,
u'ach': 2141,
u'wei': 2140,
u'sif': 2140,
u'sj\xe6': 2135,
u'kof': 2130,
u'sf\xe6': 2129,
u'fia': 2124,
u'cre': 2121,
u'nka': 2120,
u'#mc': 2119,
u'bsb': 2115,
u'oho': 2113,
u'lne': 2113,
u'v\xf8m': 2111,
u'vsu': 2110,
u'fgj': 2096,
u'sdy': 2095,
u'pba': 2093,
u'emy': 2092,
u'dk\xf8': 2092,
u'fhj': 2092,
u'dba': 2088,
u'rku': 2086,
u'mog': 2084,
u'gua': 2082,
u'sav': 2080,
u'#bs': 2077,
u'hom': 2074,
u'syr': 2072,
u'lap': 2071,
u'naf': 2068,
u'd#\xe5': 2068,
u'\xf8jr': 2066,
u'j#o': 2063,
u's\xf8e': 2060,
u'lgn': 2057,
u'aid': 2056,
u'llu': 2055,
u'bib': 2043,
u'r\xe6m': 2043,
u'but': 2042,
u'vhu': 2037,
u'\xe6gs': 2036,
u'yla': 2035,
u'sn\xe6': 2033,
u'wan': 2032,
u'rno': 2030,
u'gud': 2023,
u'pgi': 2023,
u'\xf8bi': 2020,
u'bia': 2017,
u'aas': 2015,
u'hok': 2015,
u'nak': 2015,
u'rei': 2014,
u'v\xe6b': 2012,
u'ivh': 2007,
u'kub': 2006,
u'imb': 2005,
u'una': 2004,
u'\xf8dn': 2004,
u'hu#': 2002,
u'kit': 2000,
u'#gy': 2000,
u'qui': 1999,
u'gma': 1997,
u'y#m': 1988,
u'#\xf8m': 1988,
u'ar\xe5': 1988,
u'yda': 1983,
u'eln': 1980,
u'gme': 1979,
u'#\xf8r': 1978,
u'uka': 1977,
u'\xf8rk': 1976,
u'byt': 1973,
u'geg': 1970,
u'gyp': 1969,
u't\xf8v': 1969,
u'rfu': 1968,
u'bab': 1964,
u'j\xf8v': 1956,
u'ish': 1953,
u'vse': 1952,
u'osp': 1946,
u'kob': 1946,
u'olf': 1945,
u'hai': 1941,
u'agm': 1941,
u'\xe6pp': 1937,
u'dbl': 1937,
u'aza': 1936,
u'odb': 1936,
u'hes': 1934,
u'edp': 1932,
u'j#k': 1929,
u'#ep': 1923,
u'bss': 1921,
u'lv\xe6': 1921,
u'oml': 1920,
u'pde': 1919,
u'asm': 1917,
u'ey#': 1914,
u'num': 1913,
u'lgp': 1911,
u'yel': 1909,
u'cs#': 1909,
u'bby': 1909,
u'y#a': 1907,
u'rkr': 1900,
u'ath': 1893,
u'av\xe6': 1891,
u'ps\xe6': 1890,
u'oul': 1890,
u'j#s': 1889,
u's\xf8f': 1886,
u'oak': 1884,
u'rao': 1881,
u'zim': 1879,
u'neo': 1877,
u'za#': 1875,
u'rfr': 1868,
u'ibu': 1868,
u'alr': 1866,
u'j\xf8r': 1866,
u'hon': 1864,
u'ecb': 1863,
u'p#v': 1862,
u'sru': 1859,
u'dop': 1858,
u'rvr': 1857,
u'iff': 1856,
u'sje': 1856,
u'lma': 1850,
u'#gm': 1847,
u'#zi': 1844,
u'ofo': 1843,
u'lab': 1840,
u'sh#': 1837,
u'j#i': 1836,
u'que': 1833,
u'yhe': 1831,
u'epi': 1830,
u'upe': 1830,
u'dav': 1828,
u'nia': 1827,
u'iep': 1826,
u'j#f': 1825,
u'vsa': 1824,
u'rtv': 1824,
u'nyh': 1824,
u'teh': 1823,
u'nd\xf8': 1822,
u'#h#': 1819,
u'osc': 1816,
u'lvt': 1815,
u'ceo': 1809,
u'sde': 1809,
u'#cr': 1807,
u's\xf8m': 1806,
u'keb': 1802,
u'e\xe5r': 1794,
u'chu': 1793,
u'dpu': 1793,
u'ddy': 1791,
u'eug': 1790,
u'opv': 1790,
u'sco': 1789,
u'cci': 1788,
u'kna': 1787,
u'ic#': 1787,
u'tn\xe6': 1785,
u'gaz': 1781,
u'ovs': 1774,
u'aor': 1774,
u'k\xe6b': 1774,
u'gad': 1772,
u'j\xf8s': 1770,
u'rsm': 1767,
u'ipa': 1765,
u'new': 1765,
u'iln': 1759,
u'uso': 1759,
u'rfe': 1754,
u'bsm': 1752,
u'gto': 1752,
u'exi': 1752,
u'\xe6fe': 1752,
u'bsl': 1750,
u'gui': 1749,
u'lof': 1747,
u'al\xf8': 1746,
u'svu': 1744,
u'#e\xf8': 1740,
u'lai': 1740,
u'tch': 1738,
u'gtf': 1736,
u'yds': 1734,
u'b#b': 1734,
u'lyn': 1731,
u'ues': 1730,
u'kha': 1726,
u'#ei': 1722,
u'vbr': 1722,
u'aur': 1721,
u'mma': 1720,
u'rm\xe6': 1712,
u'#t\xe5': 1712,
u'#wo': 1710,
u'chr': 1710,
u'ew#': 1710,
u'cid': 1709,
u'vac': 1709,
u'p#n': 1709,
u'obr': 1707,
u'tg\xe5': 1707,
u'f\xe6s': 1706,
u'nev': 1705,
u'k#\xf8': 1702,
u'reo': 1702,
u'add': 1701,
u'yng': 1701,
u'dlo': 1700,
u'vov': 1699,
u'cyk': 1698,
u'#bj': 1696,
u'kry': 1694,
u'y#d': 1691,
u'j#m': 1688,
u'mur': 1688,
u'y#b': 1687,
u'vss': 1682,
u'hwe': 1678,
u'd#\xe9': 1677,
u'ley': 1674,
u'lak': 1667,
u'ape': 1665,
u'dvo': 1663,
u'm\xe6g': 1662,
u'mvi': 1662,
u'bl\xf8': 1659,
u'fpr': 1658,
u'poi': 1658,
u'stv': 1657,
u'abw': 1656,
u'eiz': 1654,
u'tpe': 1653,
u'p\xe5k': 1651,
u'h\xe6m': 1650,
u'lug': 1650,
u'ejn': 1650,
u'nly': 1649,
u'j\xf8u': 1648,
u'lm#': 1647,
u'uvi': 1646,
u'uls': 1644,
u'mai': 1643,
u'keg': 1642,
u'nl#': 1641,
u'usg': 1634,
u'kl\xf8': 1633,
u'\xf8fa': 1632,
u'y#e': 1631,
u'lo#': 1630,
u'ago': 1630,
u'tae': 1630,
u'tmo': 1629,
u'gpr': 1628,
u'ek#': 1628,
u'afd': 1626,
u'f#w': 1626,
u'hne': 1625,
u'amr': 1622,
u'oed': 1617,
u'ypr': 1616,
u'dfr': 1615,
u'\xe5#\xe6': 1615,
u'lsf': 1615,
u'dee': 1614,
u'ivf': 1612,
u'urb': 1612,
u'em\xe5': 1611,
u'\xf8ud': 1605,
u'rtm': 1604,
u'agu': 1604,
u'eek': 1602,
u'arp': 1602,
u'ckl': 1601,
u'iro': 1599,
u'oya': 1599,
u'ex#': 1596,
u'uov': 1592,
u'aug': 1590,
u'chl': 1590,
u'esd': 1587,
u'gus': 1586,
u'cke': 1585,
u'bwe': 1584,
u'\xe6rh': 1584,
u'rec': 1584,
u'ubb': 1581,
u'\xe6ne': 1579,
u'coe': 1579,
u'psy': 1576,
u'loy': 1575,
u'tmi': 1575,
u'ual': 1575,
u'msb': 1575,
u'k#j': 1574,
u'rvu': 1573,
u'pl\xf8': 1571,
u'\xf8mu': 1570,
u'b\xe5n': 1570,
u'ovj': 1569,
u'kho': 1568,
u'act': 1568,
u'sl\xe6': 1568,
u'ims': 1567,
u'dha': 1567,
u'kru': 1565,
u'wie': 1564,
u'tsy': 1562,
u'vje': 1561,
u'hyp': 1561,
u'gkr': 1561,
u'gov': 1560,
u'xic': 1559,
u'\xe6rf': 1558,
u'ldf': 1557,
u'gmo': 1557,
u'gib': 1557,
u'lmo': 1557,
u'iby': 1556,
u'toe': 1555,
u'gsh': 1554,
u'ln\xe6': 1554,
u'on\xe6': 1553,
u'we#': 1552,
u'nul': 1552,
u'vsm': 1547,
u'hei': 1546,
u'yrr': 1544,
u'e#\xe9': 1544,
u'tgr': 1544,
u'ul\xe6': 1540,
u'mgi': 1540,
u'niu': 1539,
u'ib#': 1536,
u'rpa': 1536,
u'nfi': 1536,
u'dun': 1533,
u'cet': 1530,
u'l#\xe5': 1527,
u'pni': 1526,
u'umf': 1525,
u'e\xf8f': 1524,
u'ps#': 1521,
u'kau': 1513,
u'gsn': 1513,
u's\xe5f': 1513,
u'ajo': 1513,
u'rsf': 1512,
u'\xe5fr': 1512,
u'tbi': 1511,
u'gpe': 1509,
u'afp': 1509,
u'lfa': 1508,
u'ff\xf8': 1508,
u'ou#': 1508,
u'j#d': 1506,
u'yal': 1506,
u'mic': 1505,
u'upa': 1504,
u'us\xe6': 1502,
u'psp': 1501,
u'#t#': 1501,
u'sca': 1500,
u'o##': 1500,
u'ub#': 1499,
u'chm': 1497,
u'r\xf8s': 1497,
u'rth': 1497,
u'pop': 1494,
u'uru': 1489,
u'oth': 1489,
u'tap': 1485,
u'th#': 1483,
u'hag': 1482,
u'y#h': 1482,
u'mir': 1480,
u'b#k': 1479,
u'\xf8f#': 1476,
u'lgo': 1472,
u'ngn': 1470,
u'ovf': 1469,
u'bs\xe5': 1468,
u'gva': 1468,
u'out': 1466,
u'ico': 1465,
u'ebu': 1464,
u'onb': 1462,
u'vsr': 1461,
u'jr#': 1461,
u't\xe9#': 1460,
u'svo': 1460,
u'sei': 1456,
u'atf': 1450,
u'efu': 1444,
u'tsr': 1443,
u'#ph': 1441,
u'#pt': 1441,
u'uit': 1440,
u'mue': 1439,
u'kek': 1438,
u'esc': 1437,
u'nh\xf8': 1436,
u'leb': 1436,
u'ido': 1430,
u'\xf8sl': 1426,
u'noc': 1424,
u'jdn': 1422,
u'#za': 1421,
u'\xe8ve': 1421,
u'cat': 1420,
u'\xf8lv': 1419,
u'efd': 1419,
u'im#': 1418,
u'#ia': 1418,
u'dru': 1417,
u'n\xe8v': 1416,
u'ppa': 1416,
u'en\xe8': 1416,
u's\xf8s': 1414,
u'n#w': 1413,
u'hre': 1410,
u'iz#': 1405,
u'iov': 1405,
u'o#r': 1404,
u'sau': 1404,
u'mka': 1402,
u'tma': 1401,
u'\xf8nd': 1401,
u'gid': 1398,
u'sdr': 1398,
u'war': 1397,
u'vkr': 1396,
u'dub': 1396,
u'ait': 1395,
u'kig': 1395,
u'yan': 1394,
u'lur': 1394,
u'hyl': 1391,
u'abu': 1390,
u'syk': 1389,
u'hyk': 1388,
u'ceg': 1387,
u'sfl': 1387,
u'upr': 1386,
u'ha#': 1385,
u'cla': 1385,
u'\xe5kr': 1385,
u'k#c': 1383,
u'toc': 1383,
u'ho#': 1382,
u'm#y': 1378,
u'eib': 1376,
u'ely': 1376,
u'otl': 1376,
u'rdk': 1375,
u'\xe6gl': 1374,
u'ubu': 1372,
u'lun': 1363,
u'ldk': 1360,
u'akh': 1359,
u'tt#': 1356,
u'\xf8om': 1356,
u'dea': 1356,
u'leu': 1355,
u'iob': 1355,
u'#ui': 1354,
u'bow': 1353,
u'stg': 1352,
u'leh': 1351,
u'en\xe6': 1351,
u'muk': 1348,
u'tip': 1348,
u'enm': 1347,
u'opu': 1346,
u'upo': 1345,
u'\xe6rn': 1344,
u'vsp': 1342,
u'meb': 1340,
u're\xe5': 1340,
u'auf': 1339,
u'fos': 1339,
u'vmo': 1336,
u'a#\xf8': 1335,
u'k\xe6l': 1335,
u'vns': 1335,
u'nme': 1334,
u'iop': 1334,
u'irm': 1333,
u'bi#': 1333,
u'lsr': 1332,
u'gia': 1330,
u'wat': 1329,
u'bsu': 1329,
u'\xe6mi': 1328,
u'rmp': 1325,
u'amv': 1322,
u'sly': 1322,
u'gko': 1322,
u'ead': 1321,
u'ro\xf8': 1317,
u'nau': 1317,
u'lvm': 1317,
u'hto': 1316,
u'lri': 1315,
u'j\xf8o': 1315,
u'j#h': 1313,
u'eer': 1311,
u'odv': 1309,
u'aet': 1308,
u'deu': 1308,
u'cb#': 1306,
u'tir': 1305,
u'\xe6le': 1304,
u'mo#': 1304,
u'mex': 1303,
u'aul': 1302,
u'nys': 1302,
u'ovp': 1299,
u'fs#': 1298,
u'iin': 1298,
u'dhj': 1298,
u'y#p': 1297,
u'nsc': 1296,
u't\xf8m': 1295,
u'stm': 1295,
u'uhy': 1295,
u'enu': 1294,
u'y#v': 1293,
u'dts': 1293,
u'neb': 1292,
u'anh': 1290,
u'eon': 1289,
u'tov': 1288,
u'rco': 1288,
u'##\xe6': 1288,
u'yko': 1286,
u'ypp': 1282,
u'orc': 1281,
u'd\xf8s': 1280,
u'n\xe6p': 1280,
u'#cl': 1279,
u'y#k': 1279,
u'#aj': 1276,
u'd#y': 1276,
u'ieb': 1273,
u'ibr': 1272,
u'sah': 1271,
u'iri': 1270,
u'nob': 1270,
u'pfi': 1269,
u'j\xe6v': 1269,
u'eac': 1269,
u'd\xe6m': 1268,
u'tna': 1267,
u'pea': 1267,
u'\xf8po': 1267,
u'heu': 1266,
u'gyd': 1264,
u'och': 1264,
u'cco': 1264,
u'eam': 1262,
u'rou': 1261,
u'hyr': 1261,
u'tea': 1261,
u'vpr': 1259,
u'row': 1259,
u'tdi': 1259,
u'ivr': 1257,
u'joh': 1257,
u'cal': 1256,
u'het': 1255,
u'fdo': 1253,
u'eas': 1252,
u'doh': 1252,
u'pdr': 1250,
u'u#j': 1248,
u'fka': 1245,
u'#sw': 1244,
u'usd': 1242,
u'cir': 1240,
u'ehj': 1240,
u'nki': 1239,
u'nil': 1233,
u'\xf8#f': 1232,
u'\xe6rv': 1232,
u'p#b': 1232,
u'isv': 1228,
u'hmi': 1228,
u'osf': 1227,
u'uud': 1226,
u'mtv': 1226,
u'r\xe5l': 1226,
u'j#e': 1225,
u'iem': 1224,
u'hma': 1223,
u'\xf8m#': 1222,
u'h#o': 1221,
u'cou': 1220,
u'\xf8fo': 1219,
u'bei': 1219,
u'sht': 1219,
u'dgy': 1213,
u'lei': 1213,
u'bo#': 1212,
u'b##': 1212,
u'atm': 1211,
u'hju': 1209,
u'tex': 1209,
u'pou': 1208,
u'asu': 1208,
u'nez': 1208,
u'tsh': 1207,
u'gab': 1206,
u'b\xf8l': 1205,
u'hug': 1204,
u'dpa': 1204,
u'ivk': 1203,
u'nib': 1201,
u'ejt': 1200,
u'cot': 1199,
u'hab': 1198,
u'ioa': 1198,
u'os\xe6': 1197,
u'rg\xe5': 1196,
u'sk\xe5': 1194,
u'#p\xe6': 1193,
u'\xe6l#': 1192,
u'itv': 1191,
u'osy': 1188,
u'nag': 1188,
u'giu': 1187,
u'dma': 1187,
u'yrt': 1186,
u'haa': 1184,
u'oxi': 1183,
u'cqu': 1182,
u'ckh': 1180,
u'rtp': 1178,
u'j\xf8k': 1175,
u'acq': 1175,
u'ul\xf8': 1174,
u'web': 1171,
u'ldl': 1166,
u'iam': 1166,
u'nva': 1165,
u'gda': 1165,
u'rg\xe6': 1164,
u'jtr': 1163,
u'e#z': 1161,
u'ivb': 1159,
u'kao': 1156,
u'lvb': 1156,
u'mac': 1155,
u'ksk': 1155,
u'#yo': 1153,
u'vev': 1150,
u'ity': 1149,
u'iku': 1149,
u'jin': 1148,
u'\xe9#o': 1145,
u'mgr': 1144,
u'#ed': 1142,
u'pva': 1141,
u'oly': 1140,
u'pau': 1140,
u'dos': 1139,
u'gtb': 1139,
u'dap': 1139,
u'eij': 1136,
u'gtt': 1134,
u'gom': 1134,
u'hn#': 1133,
u'map': 1133,
u'oem': 1132,
u'#qu': 1131,
u'lou': 1131,
u'aos': 1131,
u'poe': 1129,
u'ous': 1129,
u'obo': 1128,
u'esg': 1128,
u'tsn': 1127,
u'#wh': 1126,
u'yor': 1124,
u'ovr': 1121,
u'ig\xf8': 1121,
u'j#v': 1119,
u'#ih': 1118,
u'asc': 1118,
u'lz#': 1117,
u'\xe5#y': 1116,
u'n#\xe9': 1116,
u'#vr': 1115,
u'fl#': 1114,
u'jte': 1113,
u'hie': 1112,
u'osa': 1111,
u'r\xf6m': 1111,
u'fyr': 1111,
u'tr\xf6': 1110,
u'tde': 1110,
u'ai#': 1108,
u'tkr': 1107,
u'nip': 1105,
u'vsd': 1104,
u'lls': 1103,
u'jac': 1102,
u'diu': 1102,
u'uom': 1101,
u'd\xe9t': 1101,
u'\xe6lv': 1098,
u'k\xf8l': 1098,
u'dsj': 1097,
u'tou': 1096,
u'amn': 1094,
u'jlt': 1092,
u'\xf8dd': 1090,
u'vsf': 1089,
u'\xe5la': 1089,
u'\xe5ga': 1087,
u's\xe5g': 1086,
u'rdb': 1086,
u'von': 1085,
u'ohn': 1085,
u'h\xf8s': 1085,
u'ulz': 1085,
u'rss': 1083,
u'ois': 1083,
u'dtv': 1082,
u'oal': 1082,
u'\xf8dh': 1082,
u'i#y': 1081,
u's\xf8l': 1080,
u'dt\xf8': 1076,
u'rui': 1074,
u'ebs': 1074,
u'shj': 1073,
u'etb': 1072,
u'p\xe6d': 1070,
u'y#r': 1069,
u'rft': 1067,
u'enp': 1067,
u'avt': 1067,
u'gek': 1066,
u'rk\xe6': 1065,
u'lae': 1065,
u'ef\xe6': 1064,
u'nou': 1063,
u'yll': 1063,
u't#w': 1062,
u'psa': 1062,
u'gs\xe6': 1062,
u'j\xf8f': 1060,
u'cod': 1060,
u'u#w': 1059,
u'kts': 1059,
u'j\xf8a': 1058,
u'tmy': 1056,
u'mty': 1055,
u'rtl': 1055,
u'iwa': 1054,
u'\xf8bs': 1053,
u'fur': 1051,
u'fl\xe6': 1050,
u'sfi': 1049,
u'#zo': 1049,
u'ypi': 1049,
u'tul': 1048,
u'htt': 1047,
u'nud': 1047,
u'ehn': 1046,
u'vos': 1044,
u's\xf8n': 1044,
u'anp': 1044,
u'gea': 1043,
u'glg': 1043,
u'k\xe6v': 1043,
u'm\xf8r': 1043,
u'avo': 1043,
u'lmu': 1042,
u'wsk': 1042,
u'iji': 1041,
u'pef': 1041,
u'igu': 1041,
u'koa': 1040,
u'ylp': 1038,
u'dfi': 1036,
u'si#': 1036,
u'ttj': 1036,
u'rbo': 1035,
u'aiw': 1035,
u'v#\xe5': 1035,
u'dmy': 1035,
u'v\xe5r': 1034,
u'mfr': 1034,
u'\xf8dr': 1032,
u'pto': 1031,
u'pkr': 1028,
u'odo': 1028,
u'#g#': 1027,
u'hea': 1026,
u'tyn': 1025,
u'ows': 1024,
u'azi': 1023,
u'tbr': 1023,
u'yd\xf8': 1022,
u'omn': 1021,
u'jos': 1021,
u'i#z': 1020,
u'\xf6m#': 1019,
u'io\xf8': 1019,
u'\xf8bn': 1019,
u'z#f': 1019,
u'oun': 1017,
u'pus': 1016,
u'ssu': 1013,
u'yvn': 1011,
u'j\xf8d': 1010,
u'rah': 1010,
u'ony': 1008,
u'him': 1008,
u'tfu': 1005,
u'isd': 1005,
u'du#': 1005,
u'p\xe6l': 1004,
u'elp': 1004,
u'mfl': 1003,
u'nsg': 1002,
u'rey': 1002,
u'ay#': 999,
u'b\xf8n': 997,
u'fav': 997,
u'hj\xf8': 996,
u'\xe5lm': 995,
u'#oe': 994,
u'p##': 991,
u'mud': 990,
u'gej': 989,
u'ots': 989,
u'otu': 989,
u'd#w': 988,
u'hiv': 987,
u'ldg': 986,
u'mcc': 985,
u'jni': 984,
u'r\xf8b': 984,
u'ipu': 984,
u'uig': 983,
u'mso': 983,
u'nt\xe6': 982,
u'ikp': 982,
u'nus': 982,
u'b\xf8j': 980,
u'myr': 979,
u'kr\xf8': 978,
u'#az': 978,
u'inr': 977,
u'#sr': 975,
u'#mf': 974,
u'jla': 972,
u'euk': 970,
u'gta': 970,
u'po#': 970,
u'mde': 970,
u'v#j': 970,
u'ied': 969,
u'ick': 969,
u'aph': 968,
u'tf\xf8': 967,
u'who': 966,
u'lex': 966,
u'fga': 965,
u'osm': 963,
u'ml\xf8': 963,
u't#\xe9': 961,
u'iii': 961,
u'\xe5#\xe9': 960,
u'moe': 960,
u'#j\xe6': 959,
u'oec': 959,
u't\xe6d': 958,
u'gon': 958,
u'lym': 955,
u'iox': 955,
u'eua': 954,
u'bl\xe5': 954,
u'idv': 953,
u'ht#': 951,
u'rk\xf8': 949,
u'#yn': 949,
u'cks': 949,
u'dak': 947,
u'#oo': 945,
u'zis': 945,
u'b#u': 945,
u'z#d': 945,
u'usc': 945,
u'ecd': 944,
u'ahe': 943,
u'wee': 942,
u'psi': 942,
u'#b\xf6': 940,
u'#ic': 939,
u'lub': 939,
u'hat': 937,
u'sno': 937,
u'ngk': 935,
u'ipe': 935,
u'up#': 934,
u'oud': 932,
u'yls': 931,
u'\xf8lt': 931,
u'm\xe5e': 928,
u'j#a': 925,
u'ozo': 924,
u'c#o': 923,
u'pi#': 923,
u'was': 922,
u'inp': 921,
u'yon': 921,
u'dja': 920,
u'ltp': 920,
u'ary': 919,
u'eo#': 918,
u'nai': 918,
u'jea': 917,
u'f#y': 917,
u'mou': 916,
u'sp#': 915,
u'rtg': 914,
u'aar': 912,
u'eim': 911,
u'yra': 910,
u'fde': 910,
u'ulp': 910,
u'eck': 909,
u'rfi': 908,
u'lms': 906,
u'p#k': 904,
u'mem': 903,
u'lv\xe5': 903,
u'aag': 901,
u'oel': 901,
u'hs#': 901,
u'\xe6rb': 901,
u'ktf': 901,
u'fko': 900,
u'otp': 899,
u'tz#': 898,
u'how': 898,
u'rez': 897,
u'rsr': 896,
u'eki': 896,
u'jar': 895,
u'\xf8gs': 895,
u'nju': 895,
u'gka': 895,
u'dfy': 894,
u'u#c': 894,
u'y#g': 893,
u'apn': 892,
u'lah': 892,
u'r\xe6#': 892,
u'ac#': 889,
u'j#p': 887,
u'ynn': 887,
u'nz#': 885,
u'afa': 885,
u'nza': 884,
u'fug': 884,
u'k\xe6n': 883,
u'fv\xe6': 880,
u'#g\xf6': 880,
u'bsv': 880,
u'tl\xf8': 880,
u'p#u': 880,
u'p\xe5h': 880,
u'rly': 879,
u'ao#': 879,
u'gil': 878,
u'eye': 877,
u'unf': 877,
u'lum': 877,
u'mfi': 877,
u'#ib': 876,
u'gho': 876,
u'hir': 876,
u'\xe5tv': 874,
u'\xf3n#': 873,
u'mda': 873,
u'okl': 873,
u'nue': 873,
u'nr\xe5': 872,
u'ff#': 870,
u'm\xe5r': 870,
u'das': 870,
u'oca': 870,
u'arz': 870,
u'\xf8nm': 869,
u'pl\xe6': 868,
u'il\xe6': 866,
u'ntv': 863,
u'ydk': 861,
u'aca': 861,
u'ffo': 860,
u'ldm': 860,
u'\xf8pr': 860,
u'mv\xe5': 859,
u'\xe6lt': 857,
u'udh': 857,
u'mv#': 856,
u'ah#': 856,
u'ulm': 855,
u'mpt': 853,
u'dna': 852,
u'tev': 852,
u'imf': 851,
u'b\xf8g': 850,
u'r\xe5v': 850,
u'nn\xe6': 850,
u'aek': 849,
u'umo': 849,
u'tkv': 849,
u'o#c': 848,
u'cns': 848,
u't\xe5s': 846,
u'kik': 846,
u'mam': 845,
u'ks\xf8': 844,
u'\xe5va': 843,
u'gir': 843,
u'ios': 842,
u'h#s': 841,
u'phe': 840,
u'gfr': 838,
u'nid': 837,
u'z#o': 836,
u'hop': 835,
u'y#l': 835,
u'aun': 833,
u'vhe': 833,
u'akl': 833,
u'l\xf8k': 831,
u'#ts': 831,
u'\xe6dr': 828,
u'kee': 828,
u'#ex': 828,
u'w#y': 824,
u'mau': 824,
u'\xe9#a': 824,
u'osu': 822,
u'iru': 821,
u'sja': 821,
u'etd': 821,
u'p\xe5f': 821,
u'yrs': 819,
u'oaf': 819,
u'ebi': 819,
u'km#': 818,
u'dme': 818,
u'dei': 818,
u'onp': 817,
u'tv#': 817,
u'euu': 817,
u'gau': 816,
u'kog': 816,
u'ksn': 815,
u'sac': 814,
u'ntd': 814,
u'yis': 813,
u'tub': 813,
u'shm': 811,
u'pad': 811,
u'bea': 810,
u'fie': 809,
u'c#s': 809,
u'jti': 805,
u'\xe6dn': 804,
u'mut': 804,
u'f\xe6g': 803,
u'koz': 803,
u'vsy': 802,
u'mru': 800,
u'wor': 800,
u'thu': 800,
u'skm': 799,
u'igo': 797,
u's#w': 795,
u'ev\xe5': 795,
u'baj': 795,
u'cea': 794,
u'cca': 794,
u'ejk': 794,
u'#km': 793,
u'lay': 792,
u'lb\xf8': 792,
u'o#j': 791,
u'lal': 790,
u'uth': 789,
u'hri': 788,
u'lgm': 786,
u'beu': 786,
u'ood': 786,
u'sh\xe5': 784,
u'ajd': 783,
u'g\xf6t': 779,
u'swo': 779,
u'\xf6te': 777,
u'l\xe6k': 777,
u'pbe': 776,
u'fig': 775,
u'#j\xf8': 775,
u'odj': 775,
u'ryo': 773,
u'bde': 772,
u'wob': 771,
u'jal': 770,
u'lho': 770,
u'onm': 769,
u'f\xe6d': 768,
u'\xf8tr': 767,
u'oua': 767,
u'ney': 767,
u'\xe5hv': 766,
u'nze': 765,
u'gur': 765,
u'lhj': 765,
u'ow#': 765,
u'psv': 764,
u'eco': 763,
u'rch': 762,
u'lee': 761,
u'vmi': 761,
u'ksf': 761,
u'owa': 760,
u'i#\xe9': 759,
u'b#n': 759,
u'iqu': 758,
u'ds\xe5': 758,
u'kid': 756,
u'nei': 756,
u'om\xe9': 755,
u'zen': 753,
u'ppr': 753,
u'kei': 752,
u'dtu': 752,
u'byl': 752,
u'rze': 751,
u'rtk': 751,
u'mab': 750,
u'naz': 749,
u'dm\xf8': 748,
u'ikv': 748,
u'gog': 747,
u'onj': 746,
u'tme': 746,
u'id\xf8': 746,
u'owi': 746,
u'obu': 745,
u'g#w': 745,
u'myg': 744,
u'tsc': 743,
u'avm': 743,
u'kak': 742,
u'keo': 742,
u'seb': 742,
u'lop': 741,
u'ubr': 741,
u'b#g': 741,
u'byi': 741,
u'ceb': 740,
u'nck': 740,
u'sod': 738,
u'uor': 737,
u'cev': 737,
u'ued': 737,
u'kfo': 735,
u'gba': 735,
u'tka': 735,
u'xid': 735,
u'bot': 734,
u'dud': 734,
u'\xe6di': 733,
u'emu': 732,
u'jf#': 732,
u'ydt': 732,
u'an\xf8': 732,
u'sfe': 731,
u'hik': 731,
u'nif': 730,
u'eja': 730,
u'cra': 729,
u'dkv': 729,
u'#u\xf8': 728,
u'akv': 728,
u'vra': 728,
u'ool': 727,
u'#kh': 727,
u'oby': 726,
u'ldu': 726,
u'ad\xf8': 726,
u'mf#': 726,
u'\xf8mn': 725,
u'sv\xe5': 723,
u'eb\xf8': 722,
u'#jf': 721,
u'sua': 720,
u'thy': 720,
u'hoc': 719,
u'thi': 718,
u'#uc': 718,
u'nee': 718,
u'eis': 716,
u'apl': 714,
u'odr': 714,
u'jke': 713,
u'btg': 713,
u'#gd': 712,
u'\xe6so': 712,
u'meu': 711,
u't\xe9e': 710,
u'ydm': 710,
u'dil': 709,
u'ya#': 708,
u'a#w': 707,
u'm#w': 707,
u's\xf8t': 707,
u'wes': 706,
u'dh\xf8': 706,
u'dez': 706,
u'mby': 704,
u'uke': 704,
u'\xe1na': 704,
u'uch': 703,
u'drg': 703,
u'lez': 703,
u'anz': 703,
u'nbu': 703,
u'#cs': 702,
u't\xe1n': 702,
u'nya': 702,
u'h#f': 702,
u'h#h': 702,
u'msy': 701,
u'nt\xe1': 700,
u'\xe6ts': 699,
u'rwa': 699,
u'yki': 698,
u'nnu': 698,
u'ieu': 697,
u'\xe6ce': 694,
u'twa': 694,
u'rsu': 693,
u'cig': 692,
u'ftk': 692,
u'zan': 692,
u'ays': 691,
u'alz': 691,
u'd\xe6r': 690,
u'sex': 690,
u'#u\xe6': 688,
u'gmu': 687,
u'hit': 687,
u'gty': 686,
u'jam': 684,
u'win': 684,
u'onu': 683,
u'dm\xe5': 683,
u'r#z': 683,
u'vsv': 682,
u'gbe': 682,
u'awa': 681,
u'vsh': 680,
u'ius': 680,
u'scr': 679,
u'ngu': 679,
u'rau': 679,
u'usb': 679,
u'#cc': 678,
u'wit': 677,
u'roi': 677,
u'zue': 677,
u'nui': 676,
u'eil': 675,
u'omu': 674,
u'dgj': 673,
u'ufa': 673,
u'dtj': 673,
u'u\xf8n': 673,
u'lr\xe5': 672,
u'inm': 672,
u'ery': 672,
u'kup': 672,
u'\xe5tr': 671,
u'sok': 670,
u'nra': 670,
u'cle': 670,
u'fma': 669,
u'urv': 669,
u'zin': 668,
u'eps': 668,
u'kok': 668,
u'ezu': 667,
u'lsh': 667,
u'nd\xe6': 667,
u'y#u': 667,
u'rry': 667,
u'skt': 666,
u'ky#': 665,
u'ais': 664,
u'cab': 664,
u'itn': 664,
u'tyv': 664,
u'iej': 663,
u'kka': 663,
u'se\xf8': 662,
u'zem': 660,
u'ght': 660,
u'asf': 660,
u'rul': 660,
u'mdr': 659,
u'agk': 659,
u'o#\xe6': 658,
u'\xe6vh': 658,
u'dlu': 658,
u'atk': 658,
u'jka': 658,
u'hbe': 657,
u'\xf8vn': 657,
u'yom': 657,
u'aje': 657,
u'unc': 656,
u'uf#': 656,
u'rps': 656,
u'byo': 656,
u'evt': 654,
u'aya': 653,
u'odf': 653,
u'ftw': 652,
u'cbs': 652,
u'mce': 652,
u'sir': 651,
u'pr\xe5': 651,
u'oos': 649,
u'rr\xe6': 649,
u'kba': 649,
u'lvr': 649,
u'mg\xe6': 648,
u'gob': 648,
u'cur': 647,
u'jev': 647,
u'j\xf8l': 647,
u'sec': 647,
u'tsd': 645,
u'tee': 645,
u'sjo': 644,
u'#ah': 644,
u'iab': 643,
u'yss': 643,
u'lkr': 642,
u'kah': 641,
u't\xe6p': 641,
u'azo': 640,
u'kma': 640,
u'ygi': 640,
u'ogm': 640,
u'e\xf8d': 639,
u'ngh': 638,
u'ogo': 638,
u'rtt': 637,
u'gr\xe5': 637,
u'u#\xf8': 636,
u'bha': 636,
u'iek': 635,
u'os\xe9': 634,
u'v#\xf8': 634,
u'phi': 633,
u'ox#': 633,
u'sg\xe6': 632,
u'umr': 632,
u'itk': 631,
u'p#g': 630,
u'\xe5t\xe6': 630,
u'l#w': 630,
u'beo': 630,
u'sro': 630,
u'knu': 629,
u'imt': 629,
u'p\xe6r': 628,
u'r\xe5s': 628,
u'bug': 627,
u'amy': 625,
u'aps': 625,
u'gii': 624,
u'tpl': 624,
u'rii': 624,
u'#cn': 622,
u'r\xf8j': 621,
u'oju': 620,
u'psu': 620,
u'iak': 620,
u'rkb': 620,
u'sd#': 620,
u'ct#': 619,
u'lul': 619,
u'msf': 619,
u'r\xf3n': 618,
u'ovt': 618,
u'uy#': 617,
u'olb': 617,
u'dou': 617,
u'olp': 616,
u'khe': 616,
u'ybd': 616,
u'yes': 615,
u'hec': 615,
u'll\xf8': 615,
u'itz': 615,
u'n\xf8v': 614,
u'zio': 614,
u'eeu': 613,
u'eys': 613,
u'dhu': 613,
u'yb#': 613,
u'\xf8bt': 612,
u'eot': 612,
u'\xe5nt': 610,
u'qua': 609,
u'loc': 608,
u'aes': 607,
u'kye': 607,
u'atc': 607,
u'exa': 607,
u'buk': 607,
u'aaf': 606,
u'dkr': 606,
u'#lt': 606,
u'\xf8lo': 606,
u'ojk': 604,
u'atp': 604,
u'ktl': 604,
u'nym': 604,
u'amc': 603,
u'\xf8#s': 603,
u'cto': 603,
u'dys': 603,
u'ffr': 602,
u'rgm': 602,
u'bac': 602,
u'efs': 601,
u'ssv': 600,
u'z#m': 598,
u'#gn': 596,
u'psk': 596,
u'tfi': 596,
u'tts': 596,
u'sn\xf8': 595,
u'sos': 594,
u'z#s': 594,
u'fr\xf8': 593,
u'#gs': 593,
u'mej': 593,
u'iik': 593,
u'uho': 593,
u'skc': 593,
u'inh': 592,
u'nru': 591,
u'dr#': 591,
u'whi': 591,
u'cau': 590,
u'\xe6ds': 590,
u'hof': 590,
u'ulv': 590,
u'bue': 590,
u'#nl': 589,
u'lni': 589,
u'#ji': 588,
u'ktp': 588,
u'mof': 588,
u'dy#': 588,
u'ih\xe6': 587,
u'kcy': 587,
u'nf\xe6': 587,
u'fsi': 586,
u'ips': 586,
u'hi#': 585,
u'avr': 585,
u'inb': 584,
u'\xe6sn': 584,
u'iae': 583,
u'aja': 583,
u'nzi': 582,
u'uks': 582,
u'bsd': 580,
u'lii': 579,
u'cop': 578,
u'en\xe5': 577,
u'miu': 576,
u'toa': 575,
u'nm\xe5': 575,
u'tgj': 574,
u'ufr': 574,
u'oh\xe6': 573,
u'my#': 573,
u'avp': 571,
u'l\xf8g': 570,
u'pio': 570,
u'c\xfan': 569,
u'nc\xfa': 569,
u'lfl': 569,
u'm#\xe9': 567,
u'umb': 567,
u'g\xe6s': 566,
u'unh': 566,
u'\xfan#': 566,
u'kpr': 566,
u'ozy': 565,
u'\xe6vr': 565,
u'#ye': 565,
u'afm': 565,
u'emd': 564,
u'adu': 564,
u'##\xf8': 564,
u'#ou': 561,
u'nco': 561,
u'veb': 561,
u'coh': 559,
u'kku': 559,
u'msg': 558,
u'uad': 557,
u'rsg': 556,
u'rty': 556,
u'dpl': 556,
u'ivo': 555,
u'\xf8sp': 555,
u'emv': 554,
u'hot': 554,
u'\xe6n#': 554,
u'\xe5f\xf8': 553,
u'iec': 552,
u'rvo': 552,
u'\xf8nf': 551,
u'ydh': 550,
u'bav': 550,
u'\xe9s#': 549,
u'swi': 549,
u'ufu': 549,
u'nns': 547,
u'aiv': 547,
u'oja': 546,
u'iog': 546,
u'zhe': 545,
u'uij': 545,
u'ody': 545,
u'lgf': 544,
u'\xf8jk': 544,
u'vai': 544,
u'xin': 544,
u'vef': 543,
u'blu': 543,
u'kbe': 543,
u'zor': 541,
u'\xf6ge': 541,
u'ceu': 541,
u'gth': 541,
u'\xe6ra': 541,
u'j\xf8h': 540,
u'ijt': 540,
u'ldp': 540,
u'ahm': 540,
u'ugb': 539,
u'tvs': 539,
u'vte': 539,
u'wil': 538,
u'z#h': 537,
u'ynk': 537,
u'ugg': 536,
u'\xe9#m': 536,
u'uml': 536,
u'rlu': 536,
u'pg\xf8': 535,
u'bam': 535,
u'kkr': 534,
u'cro': 533,
u's\xe9#': 533,
u'euo': 533,
u'ep\xe6': 531,
u'etg': 530,
u'ucl': 528,
u'boo': 528,
u'jon': 528,
u'phn': 528,
u'cao': 527,
u'g#\xe9': 527,
u'gtp': 527,
u'onz': 526,
u'\xe9#s': 526,
u'hnb': 526,
u'hek': 525,
u'lzh': 525,
u'sap': 525,
u'epp': 525,
u'fb\xf8': 524,
u'ut\xe5': 523,
u'zy#': 523,
u'b\xf6g': 523,
u'kae': 521,
u'lps': 521,
u'bop': 521,
u'h#i': 521,
u'itc': 520,
u'afe': 519,
u'fnr': 519,
u'ovu': 518,
u'iev': 518,
u'gsy': 518,
u'km\xe6': 518,
u'ews': 518,
u'mu#': 517,
u'\xe6kv': 517,
u'irg': 515,
u'j#u': 515,
u'fou': 515,
u'neh': 515,
u'owe': 514,
u'irt': 513,
u'#r\xfc': 513,
u'eid': 512,
u'ufi': 512,
u'hou': 512,
u'ijs': 511,
u'hra': 511,
u'wis': 511,
u'rdv': 511,
u'ijk': 510,
u'roz': 510,
u'rpt': 510,
u'nbi': 510,
u'kn\xe6': 509,
u'pkl': 509,
u'#tc': 509,
u'agy': 509,
u'gs\xf8': 509,
u'sh\xe6': 509,
u'aso': 508,
u'hip': 508,
u'hae': 507,
u'now': 507,
u'chn': 507,
u'l\xe6b': 507,
u'dzi': 506,
u'\xf8ff': 505,
u'ihj': 505,
u'h#m': 505,
u'ngp': 504,
u'iew': 504,
u'olg': 504,
u'ua#': 504,
u'oop': 504,
u'ar\xf3': 504,
u'h#e': 504,
u'ovm': 502,
u's\xf8i': 502,
u'hys': 502,
u'uk#': 501,
u'kai': 501,
u'oui': 501,
u'gh#': 501,
u'weg': 500,
u'aye': 500,
u'rck': 500,
u'\xf8si': 500,
u'g\xe6r': 499,
u'ubi': 499,
u'wle': 499,
u'\xe9#f': 499,
u'gd#': 499,
u'kss': 499,
u'db\xf8': 498,
u'#s#': 498,
u'rsn': 498,
u'#rw': 498,
u'xan': 498,
u'\xf8is': 497,
u'yle': 497,
u'boy': 495,
u'eos': 495,
u'ccr': 495,
u'ygh': 495,
u'd\xf8#': 495,
u'kou': 495,
u'ze#': 494,
u'\xf8jh': 493,
u'evy': 492,
u'yem': 491,
u'\xf8fl': 491,
u'nlu': 491,
u'cti': 491,
u'szo': 491,
u'ofs': 490,
u'vod': 490,
u'jko': 490,
u'lmp': 490,
u'skf': 490,
u'db\xe5': 489,
u'\xf6de': 489,
u'\xe8s#': 489,
u'rms': 489,
u'may': 488,
u'quo': 488,
u'pem': 488,
u'ysa': 488,
u's#q': 487,
u'egj': 487,
u'luc': 487,
u'ufl': 486,
u'oye': 486,
u'u\xe6n': 486,
u'iav': 485,
u'pep': 485,
u'dac': 485,
u'ajs': 485,
u'#pn': 485,
u'#m\xfc': 484,
u'ec#': 484,
u'\xe6sr': 484,
u'bl\xe6': 483,
u'ntm': 483,
u'#py': 483,
u'igm': 483,
u'rp#': 482,
u'ftn': 482,
u'x#o': 482,
u'fob': 481,
u'ttl': 481,
u'v\xf8b': 480,
u'hro': 479,
u's\xe6s': 479,
u'#wu': 478,
u'ed\xe5': 478,
u'nm\xe6': 478,
u'ns\xe5': 477,
u'lec': 477,
u'ff\xe6': 476,
u'rkv': 476,
u'#gh': 476,
u'voi': 476,
u'wol': 476,
u'bau': 476,
u'isy': 475,
u'mei': 474,
u'ut\xe6': 474,
u'goo': 474,
u'uha': 474,
u'pnr': 473,
u'loa': 473,
u'ndz': 472,
u'n\xe5b': 472,
u'y#n': 472,
u'yrl': 471,
u'dof': 471,
u'bir': 471,
u'vpo': 470,
u'ith': 470,
u'sny': 470,
u'goi': 470,
u'asl': 469,
u'rgh': 469,
u'fei': 469,
u'taj': 469,
u'bys': 469,
u'aco': 468,
u'j#b': 467,
u'tvo': 467,
u'if\xe6': 467,
u'pka': 467,
u'own': 467,
u'lfe': 466,
u'#ze': 466,
u'hst': 466,
u'v\xe6v': 466,
u'h\xf8f': 466,
u'wog': 466,
u'pyr': 466,
u'h#a': 465,
u'ehr': 464,
u'lfi': 464,
u'ws#': 463,
u'\xf8#i': 462,
u'elr': 462,
u'saa': 462,
u'asr': 462,
u'sf#': 461,
u'atz': 461,
u'um\xe5': 461,
u'yed': 460,
u'oir': 460,
u'vm\xe6': 460,
u'v\xe6n': 460,
u'dpo': 460,
u'r\xe6r': 460,
u'#oc': 459,
u'\xe1le': 459,
u'h\xe6f': 459,
u'z\xe1l': 459,
u'nz\xe1': 458,
u'\xe6dd': 458,
u'iei': 458,
u'zar': 458,
u'vy#': 458,
u'dyd': 458,
u'gve': 458,
u'lgu': 457,
u'#eb': 457,
u'k\xf8e': 457,
u'oek': 456,
u'egf': 455,
u'izi': 455,
u'kiv': 453,
u'kme': 453,
u'#gt': 453,
u'ymm': 452,
u'h\xe6d': 452,
u'\xf8ll': 452,
u'igl': 452,
u'onr': 451,
u'evu': 451,
u'ahr': 451,
u'dej': 451,
u'ltm': 450,
u'asj': 450,
u'yi#': 449,
u'lsd': 449,
u'yg#': 449,
u'cek': 448,
u'aub': 448,
u'geu': 448,
u'cd#': 448,
u'thb': 448,
u'nca': 447,
u'\xe5lt': 447,
u'\xf8sk': 447,
u'aat': 446,
u'bay': 446,
u'paa': 446,
u'#e#': 444,
u'hob': 443,
u'vif': 443,
u'npu': 443,
u'zai': 443,
u'fpa': 442,
u'yte': 442,
u'zna': 442,
u'lye': 442,
u'vgr': 441,
u'lpl': 441,
u'ook': 441,
u'wif': 440,
u'ln#': 440,
u'uo#': 439,
u'odp': 439,
u'cox': 438,
u'uca': 437,
u'#k\xf6': 437,
u'eh\xe6': 436,
u'khs': 436,
u'exc': 436,
u'ntp': 436,
u'zap': 436,
u'agv': 435,
u'\xe8re': 435,
u't\xe5b': 434,
u'pso': 434,
u'c#i': 434,
u'ioe': 434,
u'm\xe9n': 433,
u'agb': 433,
u'vti': 432,
u'thl': 432,
u'\xf8pa': 432,
u'\xf8mi': 431,
u'lze': 431,
u'ulk': 431,
u'rf#': 430,
u'euh': 430,
u'chs': 430,
u'#oz': 429,
u'sgo': 429,
u'kyi': 429,
u'mhu': 429,
u'buc': 429,
u'igb': 429,
u'\xedn#': 427,
u'#eq': 427,
u'mt\xe6': 427,
u'phu': 426,
u'peo': 426,
u'hme': 426,
u'g\xe6v': 425,
u'r\xf8#': 425,
u'uu#': 425,
u'amk': 425,
u'k#\xe5': 424,
u'om\xf8': 424,
u'uzz': 424,
u'urh': 423,
u'c\xeda': 423,
u'fd\xe6': 423,
u'equ': 423,
u'jha': 423,
u'zzo': 423,
u'#eh': 422,
u'icu': 422,
u'y#c': 421,
u'ntb': 421,
u'aom': 421,
u'#d#': 421,
u'#zu': 420,
u'fad': 420,
u'ypo': 420,
u'yrn': 419,
u'cta': 419,
u'z#b': 419,
u'jav': 418,
u'tpu': 418,
u'g\xf8d': 418,
u'#od': 417,
u'kim': 417,
u'ugh': 417,
u'fkr': 417,
u'usr': 417,
u'ayo': 416,
u'yto': 416,
u'tlo': 416,
u'ogu': 416,
u'p\xe5r': 416,
u'eit': 415,
u'hwa': 415,
u'#ip': 415,
u'\xe6ro': 415,
u'\xf8df': 415,
u'ar\xed': 415,
u'wai': 414,
u'sr\xf8': 414,
u'\xf8ml': 413,
u'eed': 412,
u'sai': 412,
u'owl': 412,
u'fjo': 411,
u'ocr': 411,
u'irb': 410,
u'zam': 410,
u'pic': 410,
u'rc\xed': 409,
u'ueg': 409,
u'day': 409,
u'suu': 408,
u'oyk': 408,
u'\xf8rh': 408,
u'hti': 408,
u'dij': 408,
u'\xf8dp': 408,
u'odg': 407,
u'wyn': 407,
u'uag': 407,
u'owh': 407,
u'ehu': 406,
u'bui': 406,
u'k\xf6l': 405,
u'jsr': 405,
u'b#l': 405,
u'\xf6ln': 405,
u'#wy': 404,
u'dul': 404,
u'bym': 404,
u'tbo': 403,
u'elz': 402,
u'umk': 402,
u'#\xf8#': 402,
u'r#\u0161': 402,
u'r\xe6e': 402,
u'vsb': 401,
u'n\xf8r': 401,
u'mt\xe5': 401,
u'ugn': 400,
u'lpn': 400,
u'nhu': 399,
u'apt': 399,
u'ssc': 399,
u'bly': 399,
u'h#v': 399,
u'h#d': 399,
u'dr\xe5': 398,
u'ouc': 398,
u'md\xf8': 398,
u'gap': 397,
u'\xf8ha': 397,
u'gf#': 396,
u'ifr': 396,
u'\xe6do': 396,
u'a#\xe5': 395,
u'iip': 395,
u'\xf6sc': 395,
u'raj': 395,
u'rir': 395,
u'b\xf6s': 394,
u'mfe': 394,
u'skh': 394,
u'jov': 393,
u'#ya': 393,
u'oty': 393,
u'usy': 393,
u'gbr': 392,
u'\xe5rv': 392,
u'ei#': 392,
u'ee#': 392,
u'c#h': 392,
u'ltv': 392,
u'zas': 392,
u'pav': 392,
u'lgd': 391,
u'wer': 391,
u's\xe5n': 391,
u'rtb': 391,
u'\xf8rd': 391,
u'npl': 391,
u'fne': 390,
u'\xeda#': 390,
u'alq': 390,
u'hte': 390,
u'anr': 390,
u'ymi': 389,
u'mev': 389,
u'sbl': 389,
u'\xf8kr': 389,
u'uei': 389,
u'cad': 388,
u'aak': 388,
u'bah': 388,
u'tuz': 388,
u'\xfcbi': 387,
u'pgr': 387,
u'cus': 386,
u'jep': 386,
u'jak': 386,
u'ofl': 386,
u'dow': 386,
u'uil': 386,
u'yti': 386,
u'r\xe5n': 386,
u'pru': 386,
u'\xf8in': 385,
u'c#e': 385,
u'lsu': 384,
u'vul': 384,
u'ndy': 384,
u'kly': 384,
u'\xe5se': 384,
u'pbr': 383,
u'\xe6th': 383,
u'mv\xe6': 383,
u'mya': 383,
u'hy#': 383,
u'tiu': 383,
u'lku': 382,
u'nwe': 382,
u'raw': 382,
u'\xf8#e': 381,
u'#ea': 381,
u'r\xfcb': 381,
u'mdi': 381,
u'oco': 381,
u'lvk': 381,
u'puy': 380,
u'wij': 380,
u'ngb': 380,
u'hez': 379,
u'bg#': 379,
u'oan': 379,
u'nlo': 379,
u'\xe5t#': 379,
u'igp': 379,
u'cli': 378,
u'ix#': 378,
u'#m#': 378,
u'spy': 378,
u'j\xf8i': 377,
u'\xf8ka': 377,
u'alc': 377,
u'guy': 377,
u'mf\xe6': 377,
u'ng\xf8': 376,
u'gop': 376,
u'atb': 376,
u'rdy': 376,
u'ect': 375,
u'aby': 375,
u'lyp': 375,
u'dpi': 375,
u'ogg': 374,
u'fot': 374,
u'pei': 374,
u'izo': 374,
u'yme': 373,
u'v#\xe6': 373,
u'r\xe8s': 373,
u'\xe6nt': 373,
u'\xf8lj': 373,
u'#\xe6d': 372,
u'ig\xe5': 372,
u'dk\xe6': 371,
u'i\xe8r': 371,
u'\xe9ko': 371,
u'ogp': 371,
u'\xf8nl': 370,
u'sug': 370,
u'myt': 370,
u'mps': 370,
u'eg\xe6': 370,
u'hl#': 370,
u'h#t': 370,
u'ldh': 369,
u'pve': 369,
u'lf#': 369,
u'dui': 369,
u'aty': 367,
u'ckx': 367,
u'tl\xe5': 367,
u'ogh': 367,
u'jri': 366,
u'kag': 366,
u'cy#': 366,
u'fnk': 366,
u'\xe9#d': 366,
u'eph': 366,
u'p\xe5m': 366,
u'pam': 365,
u'vve': 365,
u'bsn': 364,
u'key': 363,
u'\xe9#i': 363,
u'\xf8v#': 363,
u'\xe5to': 363,
u'rub': 363,
u'tuk': 363,
u'wa#': 361,
u'aui': 361,
u'coo': 361,
u'rhj': 361,
u'tzi': 360,
u'\xe9nd': 360,
u'cey': 360,
u'oes': 360,
u'm\xe9k': 360,
u'en\xf8': 360,
u'aef': 359,
u'\xe5#w': 359,
u'squ': 359,
u'nt\xf8': 359,
u'hic': 359,
u't\xedn': 358,
u'\xf8#d': 358,
u'ext': 358,
u'fa#': 358,
u'jub': 358,
u'ylr': 357,
u'at\xe6': 357,
u'tmu': 357,
u'i\xe6r': 356,
u'er\xe8': 356,
u'\xf8no': 356,
u'pay': 356,
u'dtb': 355,
u'tty': 355,
u'izb': 355,
u'sg\xe5': 354,
u'a#z': 354,
u'bee': 354,
u'jls': 354,
u'abk': 354,
u'ksr': 354,
u'tec': 354,
u'zbo': 353,
u'h\xe6l': 352,
u'\xe6v#': 352,
u'\xe6b#': 352,
u'#\xe6k': 351,
u'ovn': 351,
u'sfj': 351,
u'abb': 351,
u'r\xe6p': 351,
u'avf': 351,
u'mah': 350,
u's\xf8v': 350,
u'uze': 349,
u'bkh': 349,
u'hud': 349,
u'az#': 348,
u'z#r': 347,
u'uh\xf8': 347,
u'fid': 346,
u'soj': 346,
u'\xe5ma': 346,
u'ayt': 346,
u'pk\xf8': 346,
u'zu#': 346,
u'uhi': 346,
u'pbl': 345,
u'oza': 345,
u'c#f': 344,
u'ltb': 344,
u'ogb': 344,
u'ksy': 344,
u'zo#': 343,
u'rt\xed': 343,
u'kpu': 343,
u'ohl': 342,
u'b\xe5r': 342,
u'p#r': 342,
u'hiz': 342,
u'\xe5mi': 341,
u'ogf': 341,
u'noy': 341,
u'\xe6nn': 341,
u'j#n': 340,
u'oeu': 340,
u'gja': 340,
u'toi': 340,
u'sl#': 339,
u'bip': 339,
u'ibo': 338,
u'cun': 338,
u'atd': 338,
u'zer': 338,
u'jas': 338,
u'lyb': 338,
u'oam': 337,
u'\u0161pi': 337,
u'n\xe5d': 337,
u'#\xf8p': 337,
u'gtr': 337,
u'k\xe5n': 336,
u'nkf': 336,
u'doy': 336,
u'rkm': 336,
u'odd': 336,
u'yro': 336,
u'upu': 336,
u'jaf': 335,
u'uck': 335,
u'nv\xe6': 335,
u'azn': 335,
u'rc#': 335,
u'\xe6dt': 335,
u'jol': 335,
u'lao': 335,
u'b#r': 335,
u'roy': 334,
u'zel': 334,
u'bu#': 334,
u'sot': 333,
u'loo': 333,
u'mmo': 333,
u'pud': 333,
u'rnp': 332,
u'l#\xe9': 332,
u'vsg': 331,
u'law': 331,
u'r\xe5m': 331,
u'uh\xe6': 331,
u'hia': 331,
u'jtn': 330,
u'ffs': 330,
u'oar': 330,
u's\xe1n': 330,
u'meh': 330,
u'npo': 330,
u'nac': 330,
u'ksd': 330,
u'dup': 330,
u'gm\xf8': 329,
u'uay': 329,
u'r\xe5#': 329,
u'mck': 329,
u'fib': 328,
u'y##': 328,
u'pom': 328,
u'fao': 328,
u'ebh': 328,
u'sks': 328,
u'\xe5rn': 327,
u'k#\xe6': 327,
u'\xe6dl': 327,
u'mys': 327,
u'iou': 327,
u'dj\xe6': 326,
u'bok': 326,
u'adl': 326,
u'el\xe5': 326,
u'j\xf8t': 326,
u'\xe6mn': 326,
u'\xe5lb': 325,
u'llm': 325,
u'ntf': 325,
u'woo': 325,
u'dau': 325,
u'#pv': 325,
u'duf': 325,
u'heo': 324,
u'c#d': 324,
u'mm#': 324,
u'\xf6st': 324,
u'utu': 324,
u'z#g': 324,
u'usm': 324,
u'aht': 323,
u'rsh': 322,
u'rsc': 322,
u'too': 322,
u'oyl': 322,
u'itm': 322,
u'ulf': 322,
u'yfo': 321,
u'eue': 321,
u'rcy': 321,
u'iap': 321,
u'ghr': 321,
u'x#s': 320,
u'lgl': 320,
u'ics': 320,
u'#kg': 320,
u'rpu': 319,
u'uie': 319,
u'ntk': 319,
u'hub': 319,
u'lkl': 318,
u'ldv': 318,
u'ltt': 318,
u'biv': 318,
u'peu': 318,
u'scu': 317,
u'wel': 317,
u'aum': 317,
u'lln': 317,
u'kpo': 317,
u'mkl': 317,
u'\xe1n#': 317,
u'fef': 316,
u'icc': 316,
u'poo': 315,
u'r\xedn': 315,
u'enw': 315,
u'umt': 315,
u'dl\xe5': 314,
u'#m\xe9': 314,
u'et\xe5': 314,
u'ygr': 314,
u'shr': 313,
u'sey': 313,
u'p\xf8g': 313,
u'#dj': 313,
u'ssl': 313,
u'byp': 313,
u'\xe5df': 312,
u'utb': 311,
u'ypa': 311,
u'ccn': 311,
u'eca': 311,
u'otr': 311,
u'efp': 311,
u'aup': 310,
u'vez': 310,
u'lco': 309,
u'ngm': 309,
u'e\xe6r': 309,
u'hyd': 309,
u'vla': 308,
u'lyr': 308,
u'i\xe5r': 308,
u'obn': 307,
u'sy#': 307,
u'jk#': 307,
u'#\u0161p': 307,
u'ryb': 307,
u'#mv': 307,
u'aig': 306,
u'uvu': 306,
u'jsn': 306,
u'tp\xe5': 306,
u'iza': 306,
u'abd': 306,
u'p#l': 306,
u'aw#': 305,
u'lra': 304,
u'xce': 304,
u'vta': 304,
u'hut': 304,
u'#ee': 304,
u'yks': 304,
u'#o#': 303,
u'iea': 303,
u'sza': 303,
u'z#a': 303,
u'm\xf8l': 303,
u'ybn': 303,
u'amu': 302,
u'\xf8#h': 302,
u'swa': 302,
u'hr\xf6': 301,
u'#ae': 301,
u'foo': 301,
u'coc': 301,
u'ict': 301,
u'lvv': 301,
u't#z': 300,
u'vpa': 300,
u'gaa': 300,
u'u#y': 300,
u'gd\xf8': 300,
u'itj': 300,
u'cki': 300,
u'r\xf6d': 300,
u'ewa': 300,
u'pvc': 299,
u'tge': 299,
u'#iz': 299,
u'kx#': 299,
u'gah': 298,
u'x#f': 298,
u'\xf8af': 297,
u'pry': 297,
u'nk\xe6': 296,
u'\xe9#h': 296,
u'z#i': 296,
u'nuc': 296,
u'byv': 296,
u'yru': 295,
u'rgl': 295,
u'ti\xe5': 295,
u'dkl': 294,
u'rdp': 294,
u'\xe3o#': 294,
u'c\xf4t': 294,
u'uku': 293,
u'ofp': 293,
u'yvi': 293,
u'eet': 293,
u'\xf4te': 293,
u'\xe5lg': 293,
u'rb\xf8': 293,
u'dwa': 293,
u'\xf8sr': 293,
u'moz': 293,
u'#d\xfc': 293,
u'vc#': 293,
u'uki': 292,
u'euv': 292,
u'rca': 292,
u's\xf8d': 292,
u'npa': 292,
u'ep#': 292,
u'bba': 292,
u'\xe6tl': 291,
u'rnd': 291,
u'c#a': 291,
u'eob': 291,
u't\xf8k': 291,
u'shs': 291,
u'gku': 291,
u'ylo': 290,
u'anj': 290,
u'nth': 290,
u'#cd': 290,
u'fg\xe5': 289,
u'\xf8sa': 289,
u'\xf8op': 289,
u'rl#': 289,
u'dot': 288,
u'm\xe9#': 288,
u'rmf': 288,
u'mcm': 288,
u'oog': 288,
u'erz': 287,
u'vep': 287,
u'hts': 287,
u'cch': 287,
u'di\xe6': 287,
u'vga': 286,
u'ymn': 286,
u'ydo': 286,
u'hak': 285,
u'nro': 285,
u'nl\xf8': 285,
u'zid': 285,
u'nmi': 285,
u'pae': 285,
u'\xf8dl': 284,
u'ykn': 284,
u'\xfchr': 283,
u'psl': 283,
u'\xeate': 283,
u'sfp': 283,
u'scy': 282,
u'nkv': 282,
u'adn': 282,
u'pok': 282,
u'lch': 282,
u'msu': 282,
u'lvh': 282,
u'kjo': 281,
u'ceh': 281,
u'nmu': 281,
u'tao': 281,
u'nkk': 280,
u'hrk': 280,
u'sji': 280,
u'rtz': 280,
u'vko': 279,
u'\xf8ag': 279,
u'inz': 279,
u'yin': 279,
u'eic': 279,
u'#ay': 279,
u'kej': 279,
u'vlt': 279,
u'cc#': 279,
u'd\xfch': 279,
u'wn#': 279,
u't\xeat': 278,
u'tce': 278,
u'up\xe5': 278,
u'zqu': 278,
u'ohr': 278,
u'\xe5bl': 278,
u'kay': 277,
u'ls\xf8': 277,
u'uid': 277,
u'got': 277,
u'ivg': 276,
u'hee': 276,
u'yud': 276,
u'#rh': 276,
u'vud': 276,
u'izq': 276,
u'o#y': 275,
u'#io': 275,
u'#mm': 275,
u'\xf8or': 275,
u'agp': 275,
u'et\xea': 275,
u'std': 275,
u'dua': 275,
u'ebb': 275,
u'lg\xf8': 274,
u'kip': 274,
u'iad': 274,
u'ryl': 274,
u'f#\xe9': 274,
u'huk': 274,
u'uno': 274,
u'jss': 273,
u'fp#': 273,
u'z#e': 273,
u'ecr': 273,
u'biq': 273,
u'sb\xf8': 273,
u'oub': 272,
u'itb': 272,
u'lv\xf8': 272,
u'#gb': 271,
u'nox': 271,
u'\xf8rv': 271,
u'm\xfcn': 271,
u'#c\xf4': 270,
u'aed': 270,
u'#s\xe1': 270,
u'bob': 270,
u'\xe6ls': 270,
u'\xf8nh': 270,
u'\xe6da': 270,
u'jsi': 270,
u'#v#': 270,
u'itf': 270,
u'lbi': 270,
u'\xf8#m': 269,
u'ooi': 269,
u'mav': 269,
u'vsc': 268,
u'\xe4ne': 268,
u'am\xf8': 268,
u'#vl': 268,
u'hoo': 268,
u'fto': 268,
u'nyv': 268,
u'ikm': 268,
u'ss\xf8': 268,
u'y#j': 268,
u'rb\xe6': 267,
u'hja': 267,
u'iml': 267,
u'vhj': 267,
u'i\xf8r': 267,
u'v#c': 267,
u'thr': 267,
u'p\xe5d': 267,
u'jik': 266,
u'nkh': 266,
u'low': 266,
u'eef': 266,
u'fmi': 266,
u'apu': 266,
u'kg#': 266,
u'ksv': 266,
u'skp': 266,
u'haj': 265,
u'mbl': 265,
u'eab': 265,
u'maz': 265,
u'rld': 265,
u'mop': 265,
u'if#': 264,
u'luo': 264,
u'#xi': 264,
u'riz': 264,
u'ugy': 263,
u'#gp': 263,
u'db\xe6': 263,
u'\xfcnc': 263,
u'uja': 263,
u'msm': 263,
u'mao': 262,
u'mix': 262,
u'oup': 262,
u'lew': 262,
u'dab': 262,
u'ksh': 262,
u'a#\xe6': 261,
u'\xf8#a': 261,
u'sj\xf6': 261,
u'ayr': 261,
u'dsc': 261,
u'h#b': 261,
u'j\xf6s': 260,
u'qae': 260,
u'\xe1nc': 260,
u'\xf8he': 260,
u'sd\xe6': 260,
u'gtv': 259,
u's#\xe9': 259,
u'bec': 259,
u'\xf8li': 259,
u'msv': 259,
u'\xe5bt': 259,
u'dg\xe6': 258,
u'sog': 257,
u'pui': 257,
u'kug': 257,
u'hcr': 257,
u'#pk': 257,
u'\xe1nd': 257,
u'heg': 256,
u'pod': 256,
u'r\xe6a': 256,
u'lro': 255,
u'gut': 255,
u'wma': 255,
u'ncl': 255,
u'cov': 255,
u'vvi': 255,
u'ize': 255,
u'fse': 254,
u'uf\xf8': 254,
u'yt\xe6': 254,
u'lup': 254,
u'lyo': 254,
u'gtk': 253,
u'cr#': 253,
u'gby': 253,
u'#\xf8o': 253,
u'lqa': 253,
u'yas': 252,
u'fnm': 252,
u'imy': 252,
u'vii': 252,
u'rqu': 252,
u'nhc': 251,
u'iof': 251,
u'aij': 251,
u'nny': 251,
u'vsi': 250,
u'ov\xe1': 250,
u'iis': 250,
u'eh#': 250,
u'hth': 250,
u'udj': 250,
u'aot': 250,
u's\xf8o': 249,
u'rdm': 249,
u'nun': 249,
u'zog': 248,
u'gdr': 248,
u'yke': 248,
u'h\xf8n': 248,
u'#sd': 248,
u'hig': 248,
u'cav': 248,
u'wen': 247,
u'sye': 247,
u'aho': 247,
u'gtm': 247,
u'ftp': 247,
u'ttb': 247,
u'x#h': 247,
u'fup': 247,
u'\xf8db': 247,
u'osb': 246,
u'yva': 246,
u'meo': 246,
u'gpa': 246,
u'#k#': 246,
u'p\xe9r': 245,
u'fo#': 245,
u'ni\xf8': 245,
u'oad': 244,
u'ahi': 244,
u'olr': 244,
u'zek': 244,
u'bp#': 244,
u'sp\xe5': 244,
u'n#z': 244,
u'es\xe5': 244,
u'tm\xe5': 244,
u'pp\xe4': 243,
u'p\xe4n': 243,
u'#p\xf6': 243,
u'\xe5r\xf8': 242,
u'oeb': 242,
u'chb': 242,
u'tga': 242,
u'utm': 242,
u'#d\xed': 242,
u'rml': 242,
u'dfu': 241,
u'cyb': 241,
u'sju': 241,
u'kia': 241,
u'sfy': 241,
u'\u0144sk': 241,
u'uhn': 241,
u'vva': 241,
u'tja': 241,
u'#s\xf6': 240,
u'cya': 240,
u'eav': 240,
u'om\xe6': 240,
u'\xf8vl': 240,
u'acs': 240,
u'awi': 240,
u'\xf8l#': 240,
u'#p\xe9': 240,
u'voe': 239,
u'rsz': 239,
u'ktg': 239,
u'zaw': 239,
u'nm\xf8': 239,
u'#h\xe4': 239,
u'snu': 239,
u'\xf6ca': 239,
u'zoo': 238,
u'ijw': 238,
u'jwe': 238,
u'tth': 238,
u'vug': 238,
u'any': 238,
u'oep': 237,
u'rza': 237,
u'w#o': 237,
u'#sz': 237,
u'ych': 237,
u'\xe6#o': 237,
u'vio': 237,
u'gic': 237,
u'ulj': 237,
u'isg': 237,
u'rdu': 237,
u'etc': 237,
u'gl#': 237,
u'ghi': 237,
u'egm': 237,
u'ybr': 237,
u'\xedne': 236,
u'gf\xf8': 236,
u'oij': 236,
u'n\xe1n': 236,
u'xtu': 235,
u'osh': 235,
u'bve': 235,
u'sob': 235,
u'eh\xe5': 235,
u'ioc': 235,
u'kfu': 234,
u'gfl': 234,
u'w#d': 234,
u't\xf8n': 234,
u'a#y': 234,
u'fha': 234,
u'oon': 234,
u'wur': 233,
u'o#\xf8': 233,
u'\xe6db': 233,
u'shu': 233,
u'weu': 232,
u'rn\xe1': 232,
u'ax#': 232,
u'raz': 232,
u'ryp': 232,
u'hac': 231,
u'lfs': 231,
u'ubv': 231,
u'yus': 231,
u'cri': 230,
u'dwi': 230,
u'#\xf8l': 230,
u'buz': 230,
u'\xe5rr': 229,
u'ory': 229,
u'#f#': 229,
u'#i\xf8': 229,
u'u#z': 229,
u'seh': 229,
u'zea': 229,
u'x#a': 229,
u'piu': 229,
u'm#z': 228,
u'eem': 228,
u'#\xf6c': 228,
u'v\xe1c': 228,
u'e\xf8s': 228,
u'htk': 228,
u'awl': 228,
u'tj\xe6': 227,
u'ije': 227,
u'ogk': 227,
u'un\xe6': 227,
u'\xf8jf': 227,
u'ux#': 227,
u'tej': 227,
u'\xf8p\xe5': 227,
u'jso': 226,
u'ipc': 226,
u'h\xe4n': 226,
u'usz': 226,
u'c#m': 225,
u'oum': 225,
u'#vg': 225,
u'eok': 225,
u'\xe5bu': 225,
u'\xe4ns': 224,
u'\xe6d#': 224,
u'ovh': 224,
u'#bh': 224,
u'gaf': 224,
u'\xe9#e': 224,
u'\xf8gr': 224,
u'\xe9re': 223,
u'\xf8t#': 223,
u'oi#': 223,
u'dc#': 223,
u'ecc': 223,
u'gg\xe5': 223,
u'yar': 222,
u'#j\xf6': 222,
u'fsh': 222,
u'g\xf8j': 222,
u'ryt': 222,
u'm\xfcl': 222,
u'icy': 222,
u'jim': 221,
u'#sf': 221,
u'n\xf8k': 221,
u'ieo': 221,
u'taw': 221,
u'noe': 220,
u'j\xe6r': 220,
u'keu': 220,
u'jsk': 220,
u'fuk': 220,
u'h#n': 220,
u'way': 219,
u'#\xe6s': 219,
u'gpo': 219,
u'bai': 219,
u'ggi': 219,
u'ily': 219,
u'fbe': 218,
u'mhj': 218,
u'mcg': 218,
u'\xe5ns': 218,
u'gae': 217,
u'\xfcll': 217,
u'mm\xf8': 217,
u'jsp': 217,
u'yo#': 217,
u'vry': 217,
u'nnb': 217,
u'gbl': 216,
u'gei': 216,
u'ge\xe5': 216,
u'vtr': 216,
u'csy': 216,
u'vka': 215,
u'fif': 215,
u'gsj': 215,
u'fij': 215,
u'kij': 215,
u'x#e': 215,
u'oki': 215,
u'l\xe9s': 215,
u'm\xf8j': 215,
u'rmk': 215,
u'usf': 215,
u'aan': 214,
u'zlo': 214,
u'twe': 214,
u'koc': 214,
u'aea': 213,
u'kuh': 213,
u'\xfcss': 213,
u'rd\xe6': 213,
u'\xe5fa': 213,
u'usq': 213,
u'tfl': 212,
u'you': 212,
u'z#t': 212,
u'jlb': 212,
u'oob': 212,
u'dae': 212,
u'tvv': 211,
u'ij#': 211,
u'evs': 211,
u'cec': 211,
u'sbj': 211,
u'oyo': 211,
u'jlf': 211,
u'tyg': 211,
u'moc': 211,
u'boa': 210,
u'i\xf8j': 210,
u'ub\xf8': 210,
u'alh': 210,
u'w#z': 210,
u'ikh': 210,
u'rmh': 210,
u'a#\xe9': 209,
u'v\xe5d': 209,
u'#+#': 209,
u'dhi': 209,
u'ui#': 209,
u'gky': 209,
u'\xe6pa': 208,
u'lol': 208,
u'cpr': 208,
u'ktm': 208,
u'tda': 208,
u'uty': 208,
u'b\xe6l': 208,
u'arq': 208,
u'rgy': 207,
u'jef': 207,
u'umh': 207,
u'v\xe6d': 207,
u'lmy': 207,
u'\xe6s#': 207,
u'ji#': 206,
u'eik': 206,
u'gbp': 206,
u'lt\xe6': 206,
u'uez': 206,
u'ut\xf8': 206,
u'uci': 205,
u'olh': 205,
u'rdh': 205,
u'tdo': 205,
u'hda': 205,
u'bj\xf8': 204,
u'azz': 204,
u'c#b': 204,
u'ntg': 204,
u'z#v': 204,
u'h#p': 203,
u'nyo': 203,
u'ird': 202,
u'rzo': 202,
u'\xf8m\xe5': 202,
u'ym\xe6': 202,
u'mm\xe5': 202,
u'tc#': 202,
u'mpn': 202,
u'utp': 202,
u'vro': 202,
u'llh': 201,
u'kh#': 201,
u'kth': 201,
u'ecs': 201,
u'ugf': 200,
u'ozl': 200,
u'eyb': 200,
u'ulb': 200,
u'okb': 200,
u'ikf': 200,
u'skk': 200,
u'tvt': 199,
u'elc': 199,
u'tki': 199,
u'nah': 199,
u'py#': 199,
u'voa': 198,
u'dvu': 198,
u'g#z': 198,
u's\xf8#': 198,
u'\xe6ru': 198,
u'\xe5g\xe5': 198,
u'moh': 198,
u'ksm': 198,
u'\xe6ga': 198,
u'ivu': 197,
u'ahl': 197,
u'ahn': 197,
u'itp': 197,
u'ghd': 197,
u'\xe6bl': 196,
u'ymo': 196,
u'myl': 196,
u'r\xf6n': 196,
u'sdg': 196,
u'zbe': 196,
u'lvu': 196,
u'\xe6am': 195,
u'k#y': 195,
u'k\xf8k': 195,
u'tu#': 195,
u'#dn': 195,
u'vr\xf8': 195,
u'vr\xe6': 195,
u'fee': 195,
u'x#i': 194,
u'am\xe5': 194,
u'\xf8an': 194,
u'gbo': 194,
u'tj\xe5': 194,
u'ovd': 194,
u'dku': 194,
u'dkn': 194,
u'vf\xe6': 194,
u'ypl': 194,
u'nec': 194,
u'ikd': 194,
u'j\xe5l': 194,
u'jrf': 193,
u'lsg': 193,
u'two': 193,
u'ufn': 193,
u'ieg': 193,
u'l\xf8v': 193,
u'\xe9le': 193,
u'\xf6ld': 193,
u'pim': 193,
u'fau': 193,
u'mec': 192,
u'eyr': 192,
u'euc': 192,
u'bsg': 192,
u'\xf6ne': 192,
u'aou': 192,
u'\xf8sg': 192,
u'oot': 192,
u'#dh': 192,
u'nja': 192,
u'rmy': 192,
u'gr\xf6': 192,
u'fe+': 192,
u'obm': 191,
u'guv': 191,
u'ur\xe6': 191,
u'c#v': 191,
u'zal': 191,
u'b#j': 191,
u'pez': 191,
u'cob': 191,
u'eyl': 191,
u'cgu': 190,
u'utd': 190,
u'uef': 190,
u'ne\xe6': 190,
u'h#k': 190,
u'\xf8te': 189,
u'jia': 189,
u'ezo': 189,
u'gy#': 189,
u'lsn': 189,
u'eyc': 189,
u'crg': 189,
u'voj': 189,
u'etz': 189,
u'bey': 189,
u'oc#': 189,
u'tii': 189,
u'jud': 189,
u'cai': 188,
u'\xf6rl': 188,
u'v\xe9#': 188,
u'h\xfcs': 188,
u'gps': 188,
u'msd': 188,
u'dut': 188,
u'oef': 187,
u'ch\xfc': 187,
u'\xf8kk': 187,
u'ddo': 187,
u'vfi': 187,
u'hm#': 187,
u'kaj': 186,
u'c#t': 186,
u'tvk': 186,
u'rkh': 186,
u'\xe9#p': 186,
u'veh': 186,
u'ug\xe6': 185,
u'pss': 185,
u'\xf8rm': 185,
u'dsz': 185,
u'ddh': 185,
u'dty': 185,
u'i\xf6l': 185,
u'hi\xf6': 185,
u's\xf6d': 184,
u'aia': 184,
u'gu\xe9': 184,
u'fbi': 184,
u'cds': 184,
u'kty': 184,
u'ktk': 184,
u'il\xf8': 184,
u'ecy': 184,
u'moo': 184,
u'coa': 184,
u'fio': 183,
u'pht': 183,
u'liu': 183,
u'nae': 183,
u'nug': 183,
u'auv': 182,
u'iim': 182,
u'l\xf3p': 182,
u'\xe5do': 182,
u'iee': 182,
u'\xf3pe': 182,
u'zak': 181,
u'mbn': 181,
u'#\xe6b': 181,
u'#ie': 181,
u'v#y': 181,
u'yt\xe5': 181,
u'tuo': 181,
u'b\xe6k': 181,
u'ewi': 181,
u'ajn': 181,
u'tah': 181,
u'ybh': 181,
u'yma': 180,
u'adw': 180,
u'\xe5lf': 180,
u'pkk': 180,
u'\xf8vt': 180,
u'utz': 180,
u'sd\xf8': 180,
u'h#l': 180,
u'gl\xf8': 179,
u'niq': 179,
u'pai': 179,
u'lyf': 179,
u'dfj': 178,
u'jad': 178,
u'fop': 178,
u'sr#': 178,
u'wic': 178,
u'fnp': 178,
u'pow': 178,
u'#mg': 178,
u'atv': 178,
u'zi#': 178,
u'#y#': 178,
u'pm#': 178,
u'uva': 177,
u'ubj': 177,
u'nd\xe5': 177,
u'\xf8rb': 177,
u'u\xe9l': 177,
u'pme': 177,
u'yno': 177,
u'avb': 177,
u'cul': 176,
u'cmi': 176,
u'brd': 176,
u'y#w': 176,
u'roh': 176,
u'ehl': 176,
u'apk': 176,
u'uem': 176,
u'ckm': 176,
u'aop': 176,
u'pua': 176,
u'j#j': 175,
u'zof': 175,
u'\xf8as': 175,
u'j\xf8\xf8': 175,
u'pt\xe6': 175,
u'sut': 175,
u'tih': 175,
u'an\xe7': 175,
u'eou': 174,
u'pee': 174,
u'iok': 174,
u'ths': 174,
u'fay': 174,
u'irp': 173,
u'wab': 173,
u'ngy': 173,
u'tbu': 173,
u'hns': 173,
u'\xf6ns': 173,
u'ilc': 173,
u'b#\xf8': 173,
u'oo#': 173,
u'd#x': 173,
u'j\xf6n': 172,
u'wad': 172,
u'v\xf8#': 172,
u'm\xe5b': 172,
u'u\xe6r': 172,
u'idk': 172,
u'rfy': 171,
u'noi': 171,
u'eep': 171,
u'ury': 171,
u'j\xf8g': 171,
u'x#b': 171,
u'idh': 171,
u'm\xe6t': 171,
u'cna': 171,
u'onh': 170,
u'kac': 170,
u'om\xe5': 170,
u'rdc': 170,
u'\xe5pa': 170,
u'ntt': 170,
u'uof': 169,
u'lce': 169,
u'wim': 169,
u'oei': 169,
u'mip': 169,
u'ouz': 169,
u'lm\xe5': 169,
u'rew': 169,
u'#ms': 169,
u'hew': 168,
u'f#z': 168,
u'pp#': 168,
u'vty': 168,
u'ggl': 168,
u'ud\xe6': 168,
u'dji': 167,
u'ovv': 167,
u'e#q': 167,
u'#b\xe9': 167,
u'tk\xf8': 167,
u'ojo': 167,
u'hod': 167,
u'x#d': 167,
u'ytm': 167,
u'idf': 167,
u'mcn': 167,
u'm\xf8o': 167,
u'\xf6tt': 167,
u'ybi': 167,
u'j#l': 166,
u'pfu': 166,
u'ofk': 166,
u'\xe7a#': 166,
u'#\xe1l': 166,
u'ahu': 166,
u'#pm': 166,
u'ph#': 166,
u'fd\xf8': 166,
u'rls': 166,
u'r\xe9#': 166,
u'msh': 166,
u'fva': 165,
u'ibb': 165,
u'eie': 165,
u'lk\xf8': 165,
u'dr\xe9': 165,
u'lsc': 165,
u'rg\xf8': 165,
u'iao': 165,
u'fc#': 165,
u'mto': 165,
u'pex': 165,
u'peb': 165,
u'usj': 165,
u'pum': 164,
u'veo': 164,
u'yaf': 164,
u'eyn': 164,
u'ch\xf6': 164,
u'\xe1lv': 164,
u'ndj': 164,
u'npi': 164,
u'fno': 164,
u'mkv': 164,
u'igj': 164,
u'#g\xfc': 163,
u'vop': 163,
u'rju': 163,
u'gym': 163,
u'#bb': 163,
u'dh#': 163,
u'sp+': 163,
u'\xe5gr': 163,
u'hyb': 163,
u'eji': 163,
u'joi': 163,
u'y#\xf8': 163,
u'zza': 163,
u'osr': 162,
u'nry': 162,
u'p\xf6t': 162,
u'w#s': 162,
u'gee': 162,
u'iia': 162,
u'#eo': 162,
u'hta': 162,
u'r\xe5o': 162,
u'l\xe5t': 162,
u'\xe6sa': 162,
u'paf': 162,
u'jva': 162,
u'bya': 162,
u'yba': 162,
u'\xfchl': 161,
u'ofy': 161,
u'km\xf8': 161,
u'v\xf8s': 161,
u'adc': 161,
u'ehy': 161,
u'yad': 161,
u'kpa': 161,
u'lya': 161,
u'adz': 160,
u'\xf6rf': 160,
u'mm\xe6': 160,
u'd\xf6r': 160,
u'#\xe5l': 160,
u'\xe5ol': 160,
u'zes': 160,
u'sld': 160,
u'ocu': 160,
u'jih': 159,
u'ivp': 159,
u'jab': 159,
u'wak': 159,
u'fbu': 159,
u'ffy': 159,
u'bc#': 159,
u'lsz': 159,
u'ld\xf6': 159,
u'mub': 159,
u'n\xf8s': 159,
u'#pc': 159,
u'ilu': 159,
u'ogv': 159,
u'zzi': 159,
u'fso': 158,
u'h\xf6r': 158,
u'rtd': 158,
u'u#\xe5': 158,
u'yts': 158,
u'\xf8ss': 158,
u'r\xeda': 158,
u'ab\xf8': 158,
u'gou': 158,
u'usn': 158,
u'byu': 158,
u'avk': 158,
u'j#r': 157,
u'vei': 157,
u'i\xe9r': 157,
u'tbl': 157,
u'\xfcle': 157,
u'oyd': 157,
u'\xe1cs': 157,
u'ewe': 157,
u'ray': 157,
u'efh': 157,
u'mfs': 157,
u'hap': 156,
u'#ff': 156,
u'ugr': 156,
u'jvi': 156,
u'lk\xe6': 156,
u'yah': 156,
u'fiu': 156,
u'mi\xe8': 156,
u'vtl': 156,
u'ttn': 156,
u'd\xe5n': 156,
u'\xe5ng': 156,
u'aal': 155,
u'adb': 155,
u'vpl': 155,
u'nir': 155,
u'd\xf8g': 155,
u'nnk': 155,
u'\xe9rr': 154,
u'\xe6ka': 154,
u'#cf': 154,
u'gnm': 154,
u'ncy': 154,
u'dcy': 154,
u'ptp': 154,
u'tky': 154,
u'aoi': 154,
u'ejv': 154,
u'aze': 153,
u'heb': 153,
u'lgb': 153,
u'sip': 153,
u'olc': 153,
u'zia': 153,
u'eof': 153,
u'cok': 153,
u'cde': 153,
u'jfo': 153,
u'fep': 153,
u'ti\xe9': 153,
u'lja': 153,
u'hah': 152,
u'cma': 152,
u'hrt': 152,
u'jvo': 152,
u'sr\xe6': 152,
u'e+#': 152,
u'hfo': 152,
u'mim': 152,
u'r\xfch': 152,
u'jom': 152,
u'ftb': 152,
u'odw': 152,
u'la\xdf': 152,
u'cow': 152,
u'z#p': 152,
u'ulu': 152,
u'biz': 152,
u'doc': 152,
u'uwa': 152,
u'cfo': 152,
u'ojv': 151,
u'uct': 151,
u'uc#': 151,
u'y#\xe6': 151,
u'cac': 151,
u'auz': 151,
u'\xf8jg': 151,
u'oil': 151,
u'vl\xf8': 151,
u'll\xe9': 151,
u'uye': 151,
u'cda': 151,
u'\xe6rj': 151,
u'tze': 150,
u'vtj': 150,
u'iu#': 150,
u'ufm': 150,
u'r\xf8k': 150,
u'mts': 150,
u'fdr': 150,
u'laj': 150,
u'#xx': 150,
u'tax': 150,
u'bbc': 150,
u'stc': 149,
u'gpl': 149,
u'kdi': 149,
u'#qa': 149,
u'shv': 149,
u'gry': 149,
u'\xe6te': 148,
u'aqu': 148,
u'nct': 148,
u'f\xfcl': 148,
u'ol\xe1': 148,
u'pt\xf8': 148,
u'h##': 148,
u'\xe6gb': 148,
u's\xf2l': 147,
u'\xf2li': 147,
u'lg\xe5': 147,
u'#aa': 147,
u'iya': 147,
u'eap': 147,
u'kmi': 147,
u'syp': 147,
u'#f\xfc': 147,
u'as\xf2': 147,
u'd#z': 147,
u'bic': 147,
u'irn': 146,
u'unm': 146,
u'ufe': 146,
u'llp': 146,
u'ouy': 146,
u'uim': 146,
u'ihl': 146,
u'uam': 146,
u'vdr': 146,
u'bik': 146,
u'mvs': 145,
u'st\xe9': 145,
u'lkv': 145,
u'ur\xf8': 145,
u'#\xb0c': 145,
u'c#c': 145,
u'ovk': 145,
u'oit': 145,
u'iir': 145,
u'\xe6vl': 145,
u'dv\xe5': 145,
u'ieh': 145,
u'wul': 145,
u'uep': 145,
u'zym': 145,
u'kk#': 145,
u'r#q': 145,
u'uon': 144,
u'mna': 144,
u'guc': 144,
u'uz\xe8': 144,
u'ym#': 144,
u'z\xe8s': 144,
u'\xf8ne': 144,
u'a\u0144s': 144,
u'ad\xe6': 144,
u'rmv': 144,
u'uak': 144,
u'hlo': 144,
u'ysp': 144,
u'udy': 143,
u'rnh': 143,
u'tms': 143,
u'ozn': 143,
u'jn#': 143,
u'gm\xe6': 143,
u'rky': 143,
u'\xfcnt': 143,
u'cko': 143,
u'lfm': 143,
u'njo': 143,
u'eb\xe5': 143,
u'dki': 143,
u'rmb': 143,
u'igf': 143,
u'\xe9ve': 142,
u'#ft': 142,
u'vsn': 142,
u'erj': 142,
u'ifl': 142,
u'\xb0c#': 142,
u'jtu': 142,
u'to\u010d': 142,
u'kde': 142,
u'g\xfcn': 142,
u't\xf8b': 142,
u'ghu': 142,
u'rh\xe6': 142,
u'cst': 142,
u'ikb': 142,
u'#pf': 142,
u'rv\xf8': 142,
u'\xf8#t': 141,
u's\xe6g': 141,
u'chf': 141,
u'kuw': 141,
u'ml#': 141,
u'asd': 141,
u'lqu': 141,
u'n\xe6#': 141,
u'zfe': 141,
u'ejg': 141,
u'pv\xe6': 141,
u'sk\xe1': 141,
u'tzf': 140,
u'w#i': 140,
u'ofm': 140,
u'\u010dni': 140,
u'ltj': 140,
u'#ll': 140,
u'acr': 140,
u'm\xe4k': 140,
u'\xe9#v': 140,
u'l#z': 140,
u'#h\xfc': 140,
u'o\u010dn': 140,
u'\xe4ki': 140,
u'gm\xe5': 140,
u'dbi': 139,
u'yr\xe5': 139,
u'aim': 139,
u'yv\xe5': 139,
u'j\xe6g': 139,
u'ov\xe9': 139,
u'zwo': 139,
u'hwi': 139,
u'ntw': 139,
u'nyi': 139,
u'icr': 139,
u't\xe9s': 138,
u'cr\xe9': 138,
u'szw': 138,
u'ch\xe1': 138,
u'sss': 138,
u'dd\xf8': 138,
u'xen': 138,
u'x#m': 138,
u'lir': 138,
u'lui': 138,
u'cof': 138,
u'vbu': 138,
u'#ml': 138,
u'bb#': 138,
u'#oi': 137,
u'irv': 137,
u'fr\xe6': 137,
u'n\xe7o': 137,
u'h\xe1v': 137,
u'laa': 137,
u'cfc': 137,
u'd\xeda': 137,
u'rwe': 136,
u'nc#': 136,
u'hna': 136,
u'aym': 136,
u'vku': 136,
u'\xf8na': 136,
u'iit': 136,
u'tsj': 136,
u'a\xdf#': 136,
u'pog': 136,
u'jsf': 136,
u'jge': 136,
u'\xe1ve': 136,
u'z#\xe1': 136,
u'ryu': 136,
u'\xe7oi': 136,
u'efj': 136,
u'\xe5zo': 135,
u'\xedaz': 135,
u'h#g': 135,
u'\xf8vu': 135,
u'vm#': 135,
u'r\xe5z': 135,
u'#l\xf3': 135,
u'\xe4rm': 135,
u'occ': 135,
u'\xedam': 134,
u'jkm': 134,
u'f\xe4r': 134,
u'uas': 134,
u'ruc': 134,
u'bep': 134,
u'\xe9gu': 134,
u't\xe9v': 133,
u'ijv': 133,
u'cm#': 133,
u'yep': 133,
u'vd#': 133,
u'#n+': 133,
u'#f\xe9': 133,
u'#vv': 133,
u'joa': 133,
u'\xf8kv': 133,
u'l\xe1n': 133,
u'b\xf8s': 133,
u'bpr': 133,
u'\xdfle': 133,
u'hdi': 133,
u'hym': 133,
u'tdr': 133,
u'd\xe9n': 133,
u'taa': 133,
u'vce': 133,
u'#oa': 132,
u'tzo': 132,
u'\xf8#v': 132,
u'oa#': 132,
u'dp#': 132,
u'#\xe5g': 132,
u'zpr': 132,
u'yop': 132,
u'dtp': 132,
u'utc': 132,
u'nay': 132,
u'r\xe5t': 132,
u'nyf': 132,
u'ra\xe7': 132,
u'##\xb0': 132,
u'cei': 131,
u'#j#': 131,
u'fni': 131,
u'#ey': 131,
u'su#': 131,
u'a\xe7a': 131,
u'\xfcbn': 131,
u'aln': 131,
u'\xe5g#': 131,
u'liz': 131,
u'h\xfcb': 131,
u'jly': 131,
u'sdn': 131,
u'hmo': 131,
u'owm': 131,
u'hej': 130,
u'wag': 130,
u'hni': 130,
u'iun': 130,
u'\xe9#b': 130,
u'mib': 130,
u'#r\xe9': 130,
u'##y': 130,
u'khm': 130,
u'\xe6mo': 130,
u'be\xe6': 130,
u'buj': 130,
u'ybk': 130,
u'faf': 130,
u'ijn': 129,
u'#fs': 129,
u'oug': 129,
u'ftu': 129,
u'akm': 129,
u'ctu': 129,
u'b\xe9g': 129,
u'sh\xf8': 129,
u'opj': 129,
u'vab': 129,
u'dn\xf8': 128,
u'rnl': 128,
u'#bg': 128,
u'rgp': 128,
u'\xf8vi': 128,
u'gr\xe4': 128,
u'ryn': 128,
u'ysg': 128,
u'arx': 128,
u'duy': 128,
u'pje': 127,
u'mry': 127,
u'fna': 127,
u'ayl': 127,
u'ld\xe6': 127,
u'tvr': 127,
u'clu': 127,
u'kyr': 127,
u'um\xe9': 127,
u'dd\xe6': 127,
u'ktv': 127,
u'zav': 127,
u'isj': 127,
u'rya': 127,
u'#dm': 127,
u'lfk': 127,
u'raa': 127,
u'rr#': 127,
u'\xe6g\xe6': 127,
u'h#c': 127,
u'cz#': 126,
u'haz': 126,
u'kj\xe6': 126,
u'azp': 126,
u'\xf8#b': 126,
u'bkj': 126,
u'syt': 126,
u'ldy': 126,
u'\xe5dr': 126,
u'\xe6et': 126,
u'vt\xe6': 126,
u'tp#': 126,
u'doe': 126,
u'zae': 126,
u'utf': 126,
u'shn': 126,
u'dmo': 126,
u'dmu': 126,
u'ivm': 125,
u'nkd': 125,
u'\xf8#u': 125,
u'o\xe5r': 125,
u'\xf3ni': 125,
u'\xf8nu': 125,
u'o#z': 125,
u'pyn': 125,
u'cts': 125,
u'ap\xe5': 125,
u'i\u0107#': 125,
u'ssf': 125,
u'uiz': 125,
u'exp': 125,
u'#h\xe9': 125,
u'ygl': 125,
u'jhe': 125,
u'\u0103u#': 125,
u'ic\xf2': 125,
u'ezb': 124,
u'xch': 124,
u'sry': 124,
u'kyp': 124,
u's\xe5b': 124,
u'ydv': 124,
u'ktj': 124,
u'opy': 124,
u'pcc': 123,
u'sfs': 123,
u'nhj': 123,
u'gtg': 123,
u'odl': 123,
u'z#n': 123,
u'b\xe6n': 123,
u'\xf6hm': 123,
u'esz': 123,
u'\xf8dg': 123,
u'unr': 123,
u'pvo': 123,
u'b\xf6h': 123,
u'cua': 122,
u'\xe5s\xe6': 122,
u'kiz': 122,
u'\xe5lv': 122,
u'to\xe5': 122,
u'bsh': 122,
u'\xe5dn': 122,
u'vi\u010d': 122,
u'phy': 122,
u'd\xf8j': 122,
u'r\xe9d': 122,
u'pfl': 121,
u'xxi': 121,
u'#fp': 121,
u'bdu': 121,
u'#vs': 121,
u'rph': 121,
u'npe': 121,
u'unu': 121,
u'x##': 121,
u'z##': 121,
u'l\xe9o': 121,
u'n\xe6e': 121,
u'f\xe9i': 121,
u'jah': 120,
u'nk\xf8': 120,
u'ezn': 120,
u'hv#': 120,
u'aux': 120,
u'cum': 120,
u'm\xe5f': 120,
u'tkl': 120,
u'e\xf8k': 120,
u'#iu': 120,
u'd\xe9s': 120,
u'u#\xe6': 120,
u'sps': 120,
u'woh': 120,
u'#hh': 120,
u'zne': 120,
u'abn': 120,
u'ybf': 120,
u'\xe9ry': 119,
u'umy': 119,
u'srp': 119,
u'\xedez': 119,
u'iuf': 119,
u't\xe6v': 119,
u'oac': 119,
u'tow': 119,
u'ltf': 119,
u'hhv': 119,
u'egt': 119,
u'inw': 119,
u'fh\xf8': 119,
u'tm\xf8': 119,
u'd\xede': 119,
u'efm': 119,
u'ebn': 119,
u'\xe9in': 119,
u'm\xe9s': 118,
u'sms': 118,
u'ckt': 118,
u'ktd': 118,
u'n\xe6k': 118,
u'ksb': 118,
u'ocd': 118,
u'#ox': 118,
u'#dd': 118,
u'h#j': 118,
u'wea': 117,
u'lki': 117,
u'jbe': 117,
u'gdi': 117,
u'\xe9de': 117,
u'yce': 117,
u'yrb': 117,
u'jlp': 117,
u'#l#': 117,
u'#kf': 117,
u'sz#': 116,
u'rjo': 116,
u'vdi': 116,
u'rgk': 116,
u'g#x': 116,
u'ehi': 116,
u'lhi': 116,
u'xis': 116,
u'ttw': 116,
u'rlh': 116,
u'nyr': 116,
u'pao': 116,
u'usv': 116,
u'aol': 116,
u'\xe6pr': 115,
u'of\xf8': 115,
u'\xf8#k': 115,
u'lk\xe4': 115,
u'gai': 115,
u'#vm': 115,
u'ipt': 115,
u'\xe9si': 115,
u'bmu': 115,
u'mk\xf8': 115,
u'a\xf1e': 114,
u'k\xe4m': 114,
u'stz': 114,
u'r\xe3o': 114,
u'fmo': 114,
u'ady': 114,
u'gtd': 114,
u'jsv': 114,
u'ylv': 114,
u'cdo': 114,
u'\xe4mp': 114,
u'ccs': 114,
u'z#j': 114,
u'sng': 114,
u'nyb': 114,
u'#dc': 114,
u'ocp': 114,
u'i\u010d#': 114,
u't\xe5g': 113,
u'umg': 113,
u'noo': 113,
u'ffn': 113,
u'cud': 113,
u'mee': 113,
u'sb\xe5': 113,
u'iif': 113,
u'jsd': 113,
u'ouk': 113,
u'\xe5gi': 113,
u'\xe9#t': 113,
u'idy': 113,
u'h#u': 113,
u'hev': 112,
u'rff': 112,
u'#j\xe1': 112,
u'c#p': 112,
u'pko': 112,
u'vu#': 112,
u'yol': 112,
u'otn': 112,
u'\xf8\xf8d': 112,
u'fax': 112,
u'\xe9ra': 111,
u'w#m': 111,
u'vec': 111,
u'd\xe6d': 111,
u'doo': 111,
u'orw': 111,
u'mif': 111,
u'poz': 111,
u'#gc': 111,
u'bhu': 111,
u'btf': 111,
u'zab': 111,
u'ikn': 111,
u'\xe6gr': 111,
u'avu': 111,
u'sk\xe9': 111,
u'irc': 110,
u'ezi': 110,
u'qvi': 110,
u'gcc': 110,
u'\xf8#p': 110,
u'+re': 110,
u'kec': 110,
u'jib': 110,
u'nsj': 110,
u'oas': 110,
u'#ew': 110,
u'mup': 110,
u'\xe1l#': 110,
u'uum': 110,
u'\xf8r\xf8': 110,
u'ddg': 110,
u'aky': 110,
u'nby': 110,
u'zag': 110,
u'csu': 110,
u'nur': 110,
u'bf\xf8': 110,
u'dqu': 110,
u'h\xe9r': 109,
u'\xe9ne': 109,
u'db#': 109,
u'tby': 109,
u'#f\xe4': 109,
u'im\xe9': 109,
u'uya': 109,
u'uqu': 109,
u'\xf8kd': 109,
u'itg': 109,
u'b\xe1n': 109,
u'ktb': 109,
u'aob': 109,
u'x#v': 109,
u'ruh': 109,
u'mg#': 109,
u'pah': 109,
u'skb': 109,
u'hep': 108,
u'nvu': 108,
u'kr\xe5': 108,
u'c#k': 108,
u'dr\xed': 108,
u'jfe': 108,
u'\xf8nk': 108,
u'rcz': 108,
u'ahs': 108,
u'eht': 108,
u'ylk': 108,
u'uec': 108,
u'z#k': 108,
u'#\u0161e': 108,
u'okp': 108,
u'ujo': 108,
u'#mb': 108,
u'cut': 107,
u'w#f': 107,
u'\xedgu': 107,
u'muh': 107,
u'ouf': 107,
u'yul': 107,
u'utj': 107,
u'otf': 107,
u'r\xedg': 107,
u'daw': 107,
u'ssm': 107,
u'igk': 107,
u'riq': 107,
u'\xe6gh': 107,
u'mbs': 106,
u'#cm': 106,
u'scl': 106,
u'i\xe6t': 106,
u'aeu': 106,
u'fom': 106,
u'eig': 106,
u'gya': 106,
u'bov': 106,
u'iid': 106,
u'crs': 106,
u'ydf': 106,
u'mpd': 106,
u'wom': 106,
u'hla': 106,
u'zys': 106,
u'\xf8dm': 106,
u'vam': 106,
u'frs': 105,
u'k\xe1#': 105,
u'rfj': 105,
u'eio': 105,
u'ncs': 105,
u'g\xe6t': 105,
u'#fd': 105,
u'ohj': 105,
u'qat': 105,
u'xit': 105,
u'k\xf8#': 105,
u'b\xe5l': 105,
u'aov': 105,
u'oxe': 105,
u'\xe6ju': 105,
u'is\xf3': 105,
u'tyf': 105,
u'e\xe6n': 105,
u'lyu': 105,
u'pia': 105,
u'r\xe6j': 105,
u'efy': 105,
u'cza': 104,
u'rnm': 104,
u'eeb': 104,
u'fsf': 104,
u'pcb': 104,
u'ldc': 104,
u'm\xe5p': 104,
u'xia': 104,
u'idp': 104,
u'#tw': 104,
u'ssn': 104,
u'h#r': 104,
u'n+r': 104,
u'jeh': 103,
u'rb\xe1': 103,
u'nkg': 103,
u'k#w': 103,
u'kaz': 103,
u'zle': 103,
u'sft': 103,
u'#md': 103,
u'\xf8rf': 103,
u'ydp': 103,
u'\xe6mt': 103,
u'idg': 103,
u'rex': 103,
u'k\xe6p': 103,
u'm\xf8b': 103,
u'byb': 103,
u'bbr': 103,
u'kfi': 102,
u'ija': 102,
u'zco': 102,
u'wap': 102,
u'foc': 102,
u'ymt': 102,
u'dtf': 102,
u'boh': 102,
u'kmu': 102,
u'\xe5lo': 102,
u'uek': 102,
u'pvi': 102,
u'bao': 102,
u'hde': 102,
u'\xe5b\xf8': 102,
u'\xe9on': 102,
u'tay': 102,
u'ir\xf3': 101,
u'fm\xe6': 101,
u'k\xe9#': 101,
u'wib': 101,
u'eyt': 101,
u'ch\xe9': 101,
u'\xf8bl': 101,
u'ub\xe6': 101,
u'bsy': 101,
u'ehd': 101,
u'ylt': 101,
u'#g\xe1': 101,
u'#gf': 101,
u'xii': 101,
u'glu': 101,
u'uaa': 101,
u'pue': 101,
u'oxf': 101,
u'okv': 101,
u'r\xe9l': 101,
u'#x#': 101,
u'caf': 101,
u'czo': 100,
u'sm#': 100,
u'gfa': 100,
u'pfe': 100,
u'ofb': 100,
u'amd': 100,
u'am\xe6': 100,
u'g\xe6i': 100,
u'iuu': 100,
u'\xe7an': 100,
u'kuu': 100,
u'js\xe6': 100,
u's\xf8h': 100,
u'#mh': 100,
u'acy': 100,
u'eod': 100,
u'th\xe9': 100,
u'n#x': 100,
u'tym': 100,
u'fu#': 100,
u'piq': 100,
u'irw': 99,
u'ffu': 99,
u'ees': 99,
u'eee': 99,
u'ur\xe7': 99,
u'twi': 99,
u'bsc': 99,
u'r\xf8e': 99,
u'\xe5dl': 99,
u'\xe4\xdfl': 99,
u'exs': 99,
u'r\xe4\xdf': 99,
u'i#q': 99,
u't\xf3n': 99,
u'#yu': 99,
u'x#p': 99,
u'ms\xf8': 99,
u'ejo': 99,
u'gkl': 99,
u'oky': 99,
u'\xf8ma': 98,
u'\u0151ri': 98,
u'lgh': 98,
u'loi': 98,
u'r\xe7a': 98,
u'kaa': 98,
u'gy\u0151': 98,
u'fdi': 98,
u'h\xf6k': 98,
u'#\u0163i': 98,
u'\xf6rt': 98,
u'vib': 98,
u'vim': 98,
u'\xe9#k': 98,
u'ed\xe6': 98,
u'y\u0151r': 98,
u'\xf6km': 98,
u'yoe': 98,
u'\u0163ic': 98,
u'sj\xf8': 98,
u'\xe9l\xe9': 98,
u'nyu': 98,
u'moi': 98,
u'r\xe9u': 98,
u'xbe': 98,
u'p#j': 98,
u'lbl': 98,
u'vvr': 98,
u'hio': 98,
u'#sb': 97,
u'c\u0103u': 97,
u'clo': 97,
u'toh': 97,
u'pkt': 97,
u'ylb': 97,
u'ttg': 97,
u'exu': 97,
u'rdq': 97,
u'nt\xf3': 97,
u'ot\xf8': 97,
u'ic\u0103': 97,
u'r#\xf6': 97,
u'#oj': 96,
u'dju': 96,
u'yrp': 96,
u'aik': 96,
u'cay': 96,
u'lox': 96,
u'eej': 96,
u'wi#': 96,
u'fnt': 96,
u'\xe9un': 96,
u'g\xe1l': 96,
u'siu': 96,
u'ouv': 96,
u'voo': 96,
u'umv': 96,
u'ssr': 96,
u'tt\xf8': 96,
u'#lv': 96,
u'\xe6vt': 96,
u'kp\xe5': 96,
u'lij': 96,
u'fdg': 96,
u'gg#': 96,
u'lju': 96,
u'zol': 95,
u'p\xf6r': 95,
u'c\xf2#': 95,
u'uny': 95,
u'\xf8jb': 95,
u'ts\xf8': 95,
u'ohu': 95,
u'\xe5ly': 95,
u'gtu': 95,
u'\xe6vv': 95,
u'\xe6kh': 95,
u'fk\xf8': 95,
u'mdo': 95,
u'ctb': 95,
u'\xf8np': 95,
u'r\xe6i': 95,
u'kge': 95,
u'#tb': 95,
u'p\xe5n': 95,
u'onw': 94,
u'jtj': 94,
u'eip': 94,
u'gmi': 94,
u'\xe6f#': 94,
u'dcl': 94,
u'fkv': 94,
u'oln': 94,
u'u#x': 94,
u'n\xe5l': 94,
u'hly': 94,
u'mge': 94,
u'dib': 94,
u'u\xf8k': 94,
u'lyi': 94,
u't\xe0#': 94,
u'tzs': 93,
u'\xe6tb': 93,
u'sgy': 93,
u'm\xe6k': 93,
u'ps\xf8': 93,
u'sjt': 93,
u'l\xf8e': 93,
u't\xe6k': 93,
u'gay': 93,
u'\xe6um': 93,
u'tgi': 93,
u'iph': 93,
u'\xf8ku': 93,
u'akb': 93,
u'niz': 93,
u'\xf8gf': 93,
u'#ld': 93,
u'ulr': 93,
u'fud': 93,
u'an\xe6': 93,
u'rij': 93,
u'#kk': 93,
u'l\xe6u': 93,
u'h\xe9n': 92,
u'w#b': 92,
u'k\xe9e': 92,
u'mro': 92,
u'xfo': 92,
u'v\xe1#': 92,
u'al\xe0': 92,
u'cdu': 92,
u'ol\xe6': 92,
u'uev': 92,
u'sze': 92,
u'ruz': 92,
u'z#l': 92,
u'#l\xe9': 92,
u'vna': 92,
u'tdy': 92,
u'\xe1ne': 92,
u'nb\xf8': 92,
u't#x': 91,
u'gfi': 91,
u'cim': 91,
u'w#t': 91,
u'nkm': 91,
u'oy#': 91,
u's\xe5e': 91,
u'ttt': 91,
u'rdw': 91,
u'ckb': 91,
u'ktt': 91,
u'\xe5kl': 91,
u'nih': 91,
u'#h\xf6': 91,
u'\xe1bo': 91,
u'f\xe5s': 91,
u'goy': 91,
u'ssb': 91,
u'd\xe5s': 91,
u'jrh': 90,
u's\xe6k': 90,
u'oiv': 90,
u'nwa': 90,
u'icz': 90,
u'kdo': 90,
u'ptu': 90,
u'mci': 90,
u's\xf3n': 90,
u'da\u0144': 90,
u'nup': 90,
u'\xe5fi': 90,
u'#xv': 89,
u'\xe5#z': 89,
u'\xf8jp': 89,
u'\xe6#f': 89,
u'nh\xe5': 89,
u'\xf8ra': 89,
u'upf': 89,
u'z\xe1b': 89,
u'\xe9tr': 89,
u'cog': 89,
u'flp': 89,
u'vng': 89,
u'og\xe5': 89,
u'vnk': 89,
u'avv': 89,
u'#cb': 88,
u'g\xf6r': 88,
u'w#a': 88,
u'ca\xf1': 88,
u'rsj': 88,
u'yat': 88,
u'\xf1et': 88,
u'kdr': 88,
u'cte': 88,
u'zic': 88,
u'lix': 88,
u'z#z': 88,
u'dai': 88,
u'udu': 88,
u'rmd': 88,
u'efb': 88,
u'kbr': 88,
u'nzy': 87,
u'tvp': 87,
u'cib': 87,
u'evv': 87,
u'lzb': 87,
u'w#e': 87,
u'c#g': 87,
u'nwi': 87,
u'ppu': 87,
u'kda': 87,
u'#mr': 87,
u's#z': 87,
u'etw': 87,
u'akp': 87,
u'jpr': 87,
u'dp\xe5': 87,
u'cse': 87,
u'feo': 87,
u'ejf': 87,
u'p+#': 87,
u'rb\xe5': 86,
u'mva': 86,
u'c#r': 86,
u'ub\xe5': 86,
u'oau': 86,
u'ubp': 86,
u'kuj': 86,
u'shk': 86,
u'\xe5du': 86,
u'ouw': 86,
u'upi': 86,
u'\xe1cl': 86,
u'axi': 86,
u'ckp': 86,
u'bti': 86,
u'koi': 86,
u'hib': 86,
u'\xe6k\xf8': 85,
u'#oh': 85,
u'wro': 85,
u'ccc': 85,
u'wet': 85,
u'rng': 85,
u'azl': 85,
u'eey': 85,
u'i\xe9t': 85,
u'lwa': 85,
u'iua': 85,
u'#z\xe1': 85,
u'bs\xf8': 85,
u'tku': 85,
u'al\xe9': 85,
u'yev': 85,
u'ccp': 85,
u'\xe1s#': 85,
u'awe': 85,
u'shy': 85,
u'otc': 85,
u'd\xf8b': 85,
u'bma': 85,
u'l\xe9r': 85,
u'pm\xe5': 85,
u'pma': 85,
u'feh': 85,
u'owo': 85,
u'djo': 84,
u'\xf8mf': 84,
u'aad': 84,
u'j\xe1n': 84,
u'\xf6ms': 84,
u'erw': 84,
u'rgb': 84,
u'#fb': 84,
u'vld': 84,
u'\xf6re': 84,
u'\xe5lp': 84,
u'+##': 84,
u'ndq': 84,
u'um\xf8': 84,
u'rxi': 84,
u'i\xf3n': 84,
u'yos': 84,
u'goz': 84,
u'mox': 84,
u'vnb': 84,
u'fue': 84,
u'ewt': 84,
u'buf': 84,
u'piz': 84,
u'aib': 84,
u'oct': 84,
u'rr\xf3': 84,
u'lvp': 84,
u'irf': 83,
u'eza': 83,
u'fsm': 83,
u'v\xf8e': 83,
u'sb#': 83,
u'vly': 83,
u'eux': 83,
u'gtl': 83,
u'atj': 83,
u'n\xe9#': 83,
u'\xe6ms': 83,
u'xud': 83,
u'tl#': 83,
u'mpv': 83,
u'st\xe0': 83,
u'\u0142aw': 83,
u'ik\xe9': 83,
u'fut': 83,
u'pi\xe9': 83,
u'\xf8do': 83,
u'huh': 83,
u'kav': 83,
u'wnl': 83,
u'fea': 83,
u'avc': 83,
u'x#k': 82,
u'ojc': 82,
u's\xf6r': 82,
u'umd': 82,
u'nr\xf8': 82,
u'yab': 82,
u'eaz': 82,
u'hno': 82,
u'#n#': 82,
u'\u010dek': 82,
u'\xe5h\xe6': 82,
u'iaa': 82,
u'tk\xe6': 82,
u'sui': 82,
u'#v\xe1': 82,
u'dh\xe5': 82,
u'ftd': 82,
u'x#t': 82,
u'lu#': 82,
u'fum': 82,
u'ksw': 82,
u'dyk': 82,
u'vaf': 82,
u'jeu': 81,
u'nof': 81,
u'fpo': 81,
u'tb#': 81,
u'fsv': 81,
u'bof': 81,
u'rox': 81,
u'ltl': 81,
u'\xf8ro': 81,
u'acz': 81,
u'\xe5te': 81,
u'zir': 81,
u'cdl': 81,
u'#hn': 81,
u'nyk': 81,
u'd\xf8e': 81,
u'\xf8da': 81,
u'\u010dov': 81,
u'efk': 81,
u'\xe4l\xe4': 80,
u'k\xe5l': 80,
u'wuo': 80,
u'oid': 80,
u'vto': 80,
u'\u0161ef': 80,
u'uuf': 80,
u'j\xf8n': 80,
u'it\xf8': 80,
u'il\xe9': 80,
u'woj': 80,
u'\xe6s\xe6': 80,
u'pab': 80,
u'pip': 80,
u'bbi': 80,
u'#qi': 79,
u'ay\xe1': 79,
u'feu': 79,
u'suh': 79,
u'##w': 79,
u'f\u010do': 79,
u'atg': 79,
u'ttu': 79,
u'cp#': 79,
u'#yi': 79,
u'plp': 79,
u'b#c': 79,
u'ewo': 79,
u'ef\u010d': 79,
u'#tf': 79,
u'uol': 78,
u'gf\xe6': 78,
u'sz\xe1': 78,
u'gbi': 78,
u'oe#': 78,
u'adj': 78,
u'kuc': 78,
u'ppm': 78,
u'uus': 78,
u'oux': 78,
u'#gk': 78,
u'blj': 78,
u'mpf': 78,
u'u\xe9#': 78,
u'naa': 78,
u'ygu': 78,
u'tye': 78,
u'ejb': 78,
u'l\xe6e': 78,
u'vou': 77,
u'psf': 77,
u'fog': 77,
u'oig': 77,
u'gaw': 77,
u'lbo': 77,
u'ldd': 77,
u'ubk': 77,
u'rkk': 77,
u'xi#': 77,
u'ch\xe4': 77,
u'nao': 77,
u'cdr': 77,
u'pyd': 77,
u'#lj': 77,
u'mca': 77,
u'ik\xe6': 77,
u'\xf8dk': 77,
u'p#c': 77,
u'p#\xf8': 77,
u'rr\xf8': 77,
u'cru': 77,
u'dgo': 76,
u'ukv': 76,
u'\xe6ko': 76,
u'nrw': 76,
u'r\xf3#': 76,
u'umn': 76,
u'eif': 76,
u'ayi': 76,
u'\xe6#s': 76,
u'rcq': 76,
u'r\xfcm': 76,
u'cdm': 76,
u'lty': 76,
u'jkv': 76,
u'jci': 76,
u'la\xef': 76,
u'jlr': 76,
u'#xy': 76,
u'dno': 75,
u'#g\xe9': 75,
u'ofu': 75,
u'wem': 75,
u'dqv': 75,
u'adh': 75,
u'ke\xf8': 75,
u'c#l': 75,
u'\xe6fa': 75,
u'oim': 75,
u'oic': 75,
u'exm': 75,
u'\xe6kt': 75,
u'l\xe0#': 75,
u'ctr': 75,
u'ae#': 75,
u'ygs': 75,
u'b\xe6g': 75,
u'icl': 75,
u'pr\xfc': 75,
u'fah': 75,
u'#dt': 75,
u'urj': 75,
u'nzo': 74,
u'byc': 74,
u'zki': 74,
u'xvi': 74,
u'z\xe1j': 74,
u'ovc': 74,
u'ku#': 74,
u'im\xe5': 74,
u'poc': 74,
u'aa#': 74,
u'#rp': 74,
u'ixe': 74,
u'\xe5ko': 74,
u'fox': 74,
u'a\xef#': 74,
u'#aq': 74,
u'odh': 74,
u'tno': 74,
u'pev': 74,
u'\xe1je': 74,
u'pib': 74,
u'zbu': 74,
u'pmi': 74,
u'cbe': 74,
u'tew': 74,
u'nfu': 74,
u'tza': 73,
u'of\xe6': 73,
u'lca': 73,
u'ffl': 73,
u'zsi': 73,
u'yya': 73,
u'wye': 73,
u'y\xe1#': 73,
u'ubt': 73,
u'joe': 73,
u'vi\xe6': 73,
u'#pg': 73,
u'cty': 73,
u'p\xf8l': 73,
u'ups': 73,
u'is\xf8': 73,
u'n#q': 73,
u'okm': 73,
u'\xf8du': 73,
u'gow': 73,
u'\xf8ti': 73,
u'ar\xf8': 73,
u'hii': 73,
u'lmk': 72,
u'czy': 72,
u'yfa': 72,
u'yf\xf8': 72,
u'osw': 72,
u'ezz': 72,
u'rnk': 72,
u'#s\xf3': 72,
u'hse': 72,
u'cct': 72,
u'rpi': 72,
u've\xe5': 72,
u'ohi': 72,
u'pdo': 72,
u'npt': 72,
u'#yp': 72,
u'ntj': 72,
u'hba': 72,
u'\xe9ch': 72,
u'xyn': 72,
u'toy': 72,
u'ir\xe5': 71,
u'tr\xfc': 71,
u'lza': 71,
u'tj#': 71,
u'lo\u015f': 71,
u'\xf8#l': 71,
u'rwi': 71,
u'tft': 71,
u'#wr': 71,
u'omc': 71,
u'kic': 71,
u'boz': 71,
u'#cz': 71,
u'dci': 71,
u'vvd': 71,
u'r\xfcp': 71,
u'jec': 71,
u'uyg': 71,
u'h\xe6g': 71,
u'fty': 71,
u'hpr': 71,
u'mth': 71,
u'htf': 71,
u'#\xe0#': 71,
u'kgb': 71,
u'tup': 71,
u'\xfcpe': 71,
u'lci': 71,
u'o\u015f#': 71,
u'gvo': 71,
u'byk': 71,
u'vge': 70,
u'soa': 70,
u'\xe9vi': 70,
u'bj\xe6': 70,
u'w#h': 70,
u'o#w': 70,
u'adp': 70,
u'\xe5ln': 70,
u'imc': 70,
u'\xe9af': 70,
u'uh\xe5': 70,
u'\xe9##': 70,
u'si\xe8': 70,
u'sii': 70,
u'\xf8r\xe5': 70,
u'ol\xe5': 70,
u'\xf8sy': 70,
u'sez': 70,
u'phd': 70,
u'gsz': 70,
u'vba': 70,
u'vfu': 70,
u'mcd': 70,
u'gtc': 70,
u'mku': 70,
u'lyc': 70,
u'en\xe9': 70,
u'csa': 70,
u'\xe6gp': 70,
u'\xf3cr': 69,
u'vs\xf8': 69,
u'brn': 69,
u'wip': 69,
u'rs\xe5': 69,
u'#np': 69,
u'gec': 69,
u'eyh': 69,
u'h\xe6s': 69,
u'\xe9ma': 69,
u'\xe6#e': 69,
u'wl#': 69,
u'#m\xe1': 69,
u'khi': 69,
u'u#\u0163': 69,
u'jgo': 69,
u'\xe5ti': 69,
u'mho': 69,
u'mph': 69,
u'dtt': 69,
u'otv': 69,
u'oov': 69,
u's\xf3c': 69,
u'uji': 69,
u'#xe': 69,
u'ocy': 69,
u'eyk': 69,
u'jut': 69,
u'\xf3ka': 69,
u'vof': 68,
u'\xe5m\xe6': 68,
u'\xf8ef': 68,
u'i\xf1o': 68,
u'rnf': 68,
u'mr#': 68,
u'tfe': 68,
u'wik': 68,
u'ur\xe3': 68,
u'omy': 68,
u'na\u0144': 68,
u'ezc': 68,
u'chd': 68,
u'vda': 68,
u'\xe6#i': 68,
u'vpu': 68,
u'm\xe1n': 68,
u'uyt': 68,
u'it\u0117': 68,
u'ltk': 68,
u'\xf3#n': 68,
u'g\xe9r': 68,
u'\xe6r\xf8': 68,
u'pdy': 68,
u'xag': 68,
u'odm': 68,
u'r\xf3k': 68,
u'ihu': 68,
u'fnf': 68,
u'phr': 68,
u'xet': 68,
u'\xe1r\xf3': 68,
u'jtk': 68,
u'uwm': 68,
u'j\xe1r': 68,
u'kgr': 68,
u'\xf8la': 68,
u'dah': 68,
u'nji': 68,
u'\xeds#': 68,
u'pmo': 68,
u'eju': 68,
u'h\xe4f': 68,
u'av\xf8': 68,
u'lvc': 68,
u'#cv': 67,
u'df#': 67,
u'#zh': 67,
u'ls\xe5': 67,
u'oie': 67,
u'pc#': 67,
u'baz': 67,
u'lld': 67,
u'llb': 67,
u'jsa': 67,
u'ylu': 67,
u'myk': 67,
u'hoe': 67,
u'l\xf3#': 67,
u'xas': 67,
u'\xe8ne': 67,
u'laz': 67,
u'yka': 67,
u'spd': 67,
u'#uj': 67,
u'rye': 67,
u'\xe6in': 67,
u'\xe6gf': 67,
u'\xe9ri': 66,
u'bnf': 66,
u'\xe6ol': 66,
u'szy': 66,
u'psh': 66,
u'\xe4tt': 66,
u'#w#': 66,
u'xha': 66,
u'cq#': 66,
u'uj\xe6': 66,
u'pci': 66,
u't\xe6b': 66,
u'bwi': 66,
u'\xe5ll': 66,
u'pkv': 66,
u'g\xe5d': 66,
u'ehv': 66,
u'qim': 66,
u'pdi': 66,
u'zig': 66,
u'ihr': 66,
u'zei': 66,
u'fls': 66,
u'ik\xf8': 66,
u'ewm': 66,
u'byf': 66,
u'\xf1o#': 65,
u'\u0151ry': 65,
u'ciu': 65,
u'ogd': 65,
u'obt': 65,
u'\xe5ag': 65,
u'wue': 65,
u'ayy': 65,
u'sb\xe6': 65,
u'ad\xe9': 65,
u'\xe5he': 65,
u'uys': 65,
u'#r#': 65,
u'tgo': 65,
u'vme': 65,
u'lm\xf8': 65,
u'egs': 65,
u'jdi': 65,
u'afc': 65,
u'#xa': 65,
u'nuo': 65,
u'ejh': 65,
u'lna': 65,
u'#td': 65,
u'p\xe5a': 65,
u'us\xf8': 65,
u'tjo': 65,
u'xtr': 64,
u'scb': 64,
u'\xf8ms': 64,
u'zku': 64,
u'gb#': 64,
u'\xf8##': 64,
u'ozk': 64,
u'wid': 64,
u'#fm': 64,
u'\xf6rg': 64,
u'+#s': 64,
u'dih': 64,
u'\xe5h#': 64,
u'suy': 64,
u'l\xe4#': 64,
u'sa\xef': 64,
u'cbr': 64,
u'#uy': 64,
u'ruo': 64,
u'shd': 64,
u'\xf8s\xe6': 64,
u'\xf8\xf8k': 64,
u'diy': 64,
u'moa': 64,
u'afn': 64,
u'uhu': 64,
u'juv': 64,
u'\xf8ar': 63,
u'wed': 63,
u'oze': 63,
u'yam': 63,
u'auq': 63,
u'om\xe1': 63,
u'ayu': 63,
u'\xf8ji': 63,
u's\xe9v': 63,
u'a\u0144#': 63,
u'+#o': 63,
u'g#+': 63,
u'rt\xe5': 63,
u'\xe6ku': 63,
u'zit': 63,
u'zat': 63,
u'\xe9#u': 63,
u'naw': 63,
u'esj': 63,
u'k\xe6o': 63,
u'jua': 63,
u'#k\xe5': 63,
u'lru': 62,
u'sgl': 62,
u'nv\xe5': 62,
u'hrs': 62,
u'hrc': 62,
u'i\u015fi': 62,
u'rj\xe1': 62,
u'asq': 62,
u'ppc': 62,
u'dod': 62,
u'xfa': 62,
u'vdy': 62,
u'max': 62,
u'kmt': 62,
u'rgf': 62,
u'adk': 62,
u'dcp': 62,
u'hka': 62,
u'm\xe9a': 62,
u'\u015fin': 62,
u'ttv': 62,
u'l\xe1\u0161': 62,
u'exf': 62,
u'uau': 62,
u'ykb': 62,
u'tlu': 62,
u'wns': 62,
u'\xe6sl': 62,
u'fuj': 62,
u't\u0117#': 62,
u'\xe5ni': 62,
u'vvm': 62,
u'hi\u015f': 62,
u'rzy': 61,
u'yrv': 61,
u'lfy': 61,
u'waa': 61,
u'c#u': 61,
u'muj': 61,
u't\xe5h': 61,
u'ctp': 61,
u'vm\xe5': 61,
u'iho': 61,
u'gly': 61,
u'mtu': 61,
u'jd#': 61,
u'h#w': 61,
u'bno': 60,
u'haw': 60,
u'uk\xe6': 60,
u'jtp': 60,
u'lfg': 60,
u'vey': 60,
u'n\u0103u': 60,
u'fpm': 60,
u'rja': 60,
u'#j\xe4': 60,
u'doz': 60,
u'cup': 60,
u'rgn': 60,
u'uic': 60,
u'lhy': 60,
u'lpu': 60,
u'lph': 60,
u'u#q': 60,
u'bho': 60,
u'psd': 60,
u'kt\xe6': 60,
u'kxb': 60,
u'vbo': 60,
u'cdi': 60,
u'in\u0103': 60,
u'pyt': 60,
u'ygm': 60,
u'nyn': 60,
u'dex': 60,
u'af\xe6': 60,
u'bub': 60,
u'vv\xe6': 60,
u'gl\xe5': 60,
u'hua': 60,
u'nbh': 60,
u'\xe6gy': 60,
u'tr#': 59,
u'pfr': 59,
u'auc': 59,
u'yei': 59,
u'uj#': 59,
u'dg#': 59,
u'eqf': 59,
u'nh#': 59,
u'ehm': 59,
u'jsl': 59,
u'jsm': 59,
u'gtj': 59,
u'voc': 59,
u'acu': 59,
u'exe': 59,
u'th\xf8': 59,
u'asg': 59,
u'r\xe5p': 59,
u'upm': 59,
u'vfa': 59,
u'uav': 59,
u'e\xe6g': 59,
u'xbc': 59,
u'csh': 59,
u'#db': 59,
u'cf#': 59,
u'kfr': 58,
u'nzb': 58,
u'fr\xe5': 58,
u'#\xf3#': 58,
u'ci\xf3': 58,
u'szk': 58,
u'ucs': 58,
u'er+': 58,
u'eyp': 58,
u'bko': 58,
u'uf\xe6': 58,
u'ufs': 58,
u'#\xa3#': 58,
u'gm#': 58,
u'nhr': 58,
u'ltd': 58,
u'xiv': 58,
u'cdc': 58,
u'ipb': 58,
u'ptj': 58,
u'dbs': 58,
u'ekd': 58,
u'nm\xe4': 58,
u'j\xe4\xe4': 58,
u'xer': 58,
u'utk': 58,
u'hls': 58,
u'dm#': 58,
u'\xe6sb': 58,
u'jd\xe6': 58,
u'kow': 58,
u'sdu': 58,
u'n\xe6t': 58,
u'ysc': 58,
u'thj': 58,
u'lnu': 58,
u'\xe4\xe4t': 58,
u'jma': 57,
u'#cp': 57,
u'c#n': 57,
u'utl': 57,
u'iul': 57,
u'chk': 57,
u'ahy': 57,
u'\xe9#n': 57,
u'hga': 57,
u'veu': 57,
u'sae': 57,
u'mlo': 57,
u'g\xf8g': 57,
u's\xe5m': 57,
u'shg': 57,
u'nex': 57,
u'baa': 57,
u'tuv': 57,
u'x#n': 56,
u'j\xf6r': 56,
u'uob': 56,
u'#fg': 56,
u'cag': 56,
u'nod': 56,
u'#\u0151r': 56,
u'tb\xe5': 56,
u'om\xe4': 56,
u'muf': 56,
u'llv': 56,
u'iaz': 56,
u'm\xe1r': 56,
u'dhr': 56,
u'coi': 56,
u'l\xe9c': 56,
u'esq': 56,
u'nub': 56,
u'sc#': 55,
u'rfs': 55,
u'pf#': 55,
u'sgu': 55,
u'\xeffi': 55,
u'szl': 55,
u'ogy': 55,
u'aie': 55,
u'\xe0#l': 55,
u'qf#': 55,
u'elj': 55,
u'n\xf8e': 55,
u'hsy': 55,
u'\xe9e#': 55,
u'suf': 55,
u'\xe9#l': 55,
u'\xe9#g': 55,
u'##\xe9': 55,
u'ndc': 55,
u'a\xeff': 55,
u'it\xe4': 55,
u'rdd': 55,
u'#hc': 55,
u'i\xe3o': 55,
u'nqu': 55,
u'ysl': 55,
u'gce': 55,
u'thm': 55,
u'vv#': 55,
u'rnr': 55,
u's\xe3o': 55,
u'\xe1ri': 55,
u'hiq': 55,
u'yby': 55,
u'kfa': 54,
u'ivv': 54,
u'\xf8mr': 54,
u'aam': 54,
u'aep': 54,
u'rnv': 54,
u'obk': 54,
u'jrs': 54,
u'#s\xe9': 54,
u'gyi': 54,
u'kio': 54,
u'v\xf8r': 54,
u'\xe6#m': 54,
u'#v\xe9': 54,
u'a\xefr': 54,
u'ipf': 54,
u'ohe': 54,
u'kpl': 54,
u'zeg': 54,
u'l\xfcg': 54,
u'b#y': 54,
u'cgr': 54,
u'ruv': 54,
u'z#u': 54,
u'\xfcge': 54,
u'#l\xe1': 54,
u'l\xe1s': 54,
u'd\xf8t': 54,
u'n\xe6b': 54,
u'r\xfan': 54,
u'r#x': 54,
u'rrr': 54,
u'yfr': 53,
u'zos': 53,
u't#q': 53,
u'mnu': 53,
u'\u0161em': 53,
u'h\xe9e': 53,
u't\xe1#': 53,
u'dze': 53,
u'dth': 53,
u'mey': 53,
u'xmi': 53,
u'#bp': 53,
u'n\xfcr': 53,
u'\xe9et': 53,
u'\xf8rp': 53,
u'hoa': 53,
u'\xe4fe': 53,
u'le\xe5': 53,
u'zil': 53,
u'lax': 53,
u'kch': 53,
u'pvv': 53,
u'fl\xfc': 53,
u't\xe4l': 53,
u'afu': 53,
u'ks\xe5': 53,
u'fyt': 53,
u'ocb': 53,
u'm\xe3o': 53,
u'\xe1ny': 53,
u'ti\xe8': 53,
u'tij': 53,
u'#p\xf8': 53,
u'us\xed': 53,
u'ybo': 53,
u'on\xf8': 52,
u'\xe1sz': 52,
u'nkb': 52,
u'as\xe5': 52,
u'psr': 52,
u'\xe9di': 52,
u'eez': 52,
u'f#q': 52,
u'xsl': 52,
u'oia': 52,
u'\xe6lj': 52,
u'n\xf8l': 52,
u'g#q': 52,
u'\xf6ra': 52,
u'acf': 52,
u'th\xe5': 52,
u'zah': 52,
u'li\xe8': 52,
u'oxb': 52,
u'pyo': 52,
u'di\u0107': 52,
u'ysz': 52,
u'ocf': 52,
u'skg': 52,
u'lv\xe9': 52,
u'x#g': 51,
u'\xe6ph': 51,
u'#gg': 51,
u'cic': 51,
u'\xe1#o': 51,
u'mvp': 51,
u'#s\xe3': 51,
u'p\xe6t': 51,
u'fnl': 51,
u'lsj': 51,
u'zl\xf3': 51,
u'#bm': 51,
u'\xf8ni': 51,
u'zha': 51,
u'qu\xe9': 51,
u'mi\xe3': 51,
u'+#e': 51,
u'pof': 51,
u'#rc': 51,
u'\xf3#f': 51,
u'lpi': 51,
u'kl\xe5': 51,
u'itd': 51,
u'ueb': 51,
u'oxy': 51,
u'ma\u0144': 51,
u'yrh': 51,
u'swe': 51,
u'\xf8su': 51,
u'abc': 51,
u'r\xe9s': 51,
u'm\xf8g': 51,
u'\xe6gm': 51,
u'owb': 51,
u'lvl': 51,
u'fr\xe9': 50,
u'sm\xe3': 50,
u'fft': 50,
u'eia': 50,
u'psc': 50,
u'en\u0117': 50,
u'yea': 50,
u'ajf': 50,
u'#hl': 50,
u'pps': 50,
u'jsb': 50,
u'rtc': 50,
u'yun': 50,
u'jks': 50,
u'l\xe8n': 50,
u'mls': 50,
u'\xe5pl': 50,
u'ghs': 50,
u'asv': 50,
u'lih': 50,
u'+pr': 50,
u'fl\xe9': 50,
u'#dg': 50,
u'ocs': 50,
u'juk': 50,
u'faj': 50,
u'avg': 50,
u'cze': 49,
u'yth': 49,
u'kje': 49,
u'voy': 49,
u'a\xf1a': 49,
u'lng': 49,
u'n\xe7a': 49,
u'szc': 49,
u'\xe5rf': 49,
u'cmr': 49,
u'zcz': 49,
u'mrs': 49,
u'ffw': 49,
u'ncu': 49,
u'wig': 49,
u'#n\xfc': 49,
u'fwi': 49,
u'bkv': 49,
u'dgh': 49,
u'chp': 49,
u'o\xf1a': 49,
u'ce\xe5': 49,
u'foe': 49,
u'zma': 49,
u'unb': 49,
u'zej': 49,
u'nij': 49,
u'l\xe5g': 49,
u'ecl': 49,
u'i\u0144s': 49,
u'mg\xf8': 49,
u'otb': 49,
u'#ln': 49,
u'opc': 49,
u'zul': 49,
u'hcf': 49,
u'#pj': 49,
u'#ow': 49,
u'frf': 48,
u'mb\xf3': 48,
u'fm\xe5': 48,
u'ugd': 48,
u'\xe1#s': 48,
u'\xe9ns': 48,
u'gnt': 48,
u'aic': 48,
u'wah': 48,
u'ibn': 48,
u'jra': 48,
u'iye': 48,
u'jns': 48,
u'mez': 48,
u'\xf8jv': 48,
u'\xe1\u0161i': 48,
u'\u0148ka': 48,
u'rcs': 48,
u'a\u0148k': 48,
u'\xf1an': 48,
u'xpr': 48,
u'edw': 48,
u'hko': 48,
u'ltw': 48,
u'jok': 48,
u'mdg': 48,
u'cka': 48,
u'\xe1#f': 48,
u'ma\u0148': 48,
u'\u0161ik': 48,
u'mcz': 48,
u'ajk': 48,
u'p#\xe5': 48,
u'bbo': 48,
u'\xe6kf': 47,
u'hey': 47,
u'gb\xf8': 47,
u'mvu': 47,
u'gny': 47,
u'ob\xf8': 47,
u'wav': 47,
u'y#\xe5': 47,
u'uzk': 47,
u'uz#': 47,
u'\xf8#g': 47,
u'ffp': 47,
u'#ww': 47,
u'yak': 47,
u'drh': 47,
u'ayn': 47,
u'orz': 47,
u'k\xfd#': 47,
u'vlo': 47,
u'oaq': 47,
u's\xed#': 47,
u'\xfcrn': 47,
u'iac': 47,
u'fcm': 47,
u'yhu': 47,
u'ylw': 47,
u'f\xf8k': 47,
u'mdc': 47,
u'anw': 47,
u'\xf8vh': 47,
u'fp\xf6': 47,
u'jg\xe5': 47,
u'ihi': 47,
u'egb': 47,
u'li\xe6': 47,
u'paz': 47,
u'aif': 47,
u'aji': 47,
u'hue': 47,
u'rmg': 47,
u'cue': 46,
u'jic': 46,
u'gfc': 46,
u'jaa': 46,
u'gug': 46,
u'yia': 46,
u'loe': 46,
u'\xf8#n': 46,
u'tfy': 46,
u'do\u011f': 46,
u'gys': 46,
u'do\xf1': 46,
u'fdp': 46,
u'o\u011fa': 46,
u'tsw': 46,
u'mih': 46,
u'vys': 46,
u'm\xe9e': 46,
u'ohs': 46,
u'isz': 46,
u'rvt': 46,
u'\xf8ga': 46,
u'egp': 46,
u'bez': 46,
u'jlv': 46,
u'\u011fan': 46,
u'r\xe9m': 46,
u'f\xe5t': 46,
u'xbr': 46,
u'\xe5sa': 46,
u'epm': 46,
u'ti\xe6': 46,
u'tiz': 46,
u'#p#': 46,
u'cba': 46,
u'arw': 46,
u'duj': 46,
u'#kj': 46,
u'#kw': 46,
u'vau': 46,
u'jid': 45,
u'pbu': 45,
u'xon': 45,
u'zk\xfd': 45,
u'pja': 45,
u'bcs': 45,
u'b\xf3#': 45,
u'doa': 45,
u'ayb': 45,
u'\xf8bm': 45,
u'\xf8bo': 45,
u'\xe5ld': 45,
u'yqu': 45,
u'uyq': 45,
u'iey': 45,
u'w\xfcr': 45,
u'#mp': 45,
u'n\xe9r': 45,
u'olz': 45,
u'ftm': 45,
u'eoe': 45,
u'coy': 45,
u'nii': 45,
u'yog': 45,
u'#\xf8b': 45,
u'fow': 45,
u'zur': 45,
u'uln': 45,
u'hyt': 45,
u'lyl': 45,
u'lyh': 45,
u'epc': 45,
u'd\xe9b': 45,
u'd\xe5d': 45,
u'rix': 45,
u'\xe6go': 45,
u'ybs': 45,
u'r+#': 44,
u'hay': 44,
u'lr#': 44,
u'osj': 44,
u'\xe9va': 44,
u'\u0161a#': 44,
u'aac': 44,
u'vob': 44,
u'h\xe9l': 44,
u'ijo': 44,
u'\xf8ak': 44,
u'w#v': 44,
u'cv#': 44,
u'vee': 44,
u'ffb': 44,
u'axa': 44,
u'jfi': 44,
u'oib': 44,
u'goa': 44,
u't\xe6m': 44,
u'qu\xed': 44,
u'\xfcrt': 44,
u'iai': 44,
u'r\xf8l': 44,
u'hwo': 44,
u'tpi': 44,
u'dl#': 44,
u'yur': 44,
u'\xf8ov': 44,
u'b\xe5s': 44,
u'ih#': 44,
u'zec': 44,
u'\xf8gi': 44,
u'u\xedn': 44,
u'spb': 44,
u'uxl': 44,
u'ecp': 44,
u'mgl': 44,
u'c\u0142a': 44,
u'iwe': 44,
u'd#q': 44,
u'l\xe9v': 44,
u'af\xe9': 44,
u'\xf4ne': 44,
u'ocg': 44,
u'oc\u0142': 44,
u'eyf': 44,
u'csi': 44,
u'prk': 44,
u'ln\xf8': 44,
u'ss\xe9': 44,
u'jum': 44,
u'ri\xe8': 44,
u'\xe5by': 44,
u'mfy': 44,
u'dng': 43,
u'scf': 43,
u'a\xf1o': 43,
u'w#c': 43,
u'jaw': 43,
u'ofd': 43,
u'br\xfa': 43,
u'gnv': 43,
u'waz': 43,
u'jr\xf8': 43,
u'ur\xe5': 43,
u'fnu': 43,
u'fng': 43,
u'box': 43,
u'zsc': 43,
u'\xe6ln': 43,
u'zli': 43,
u'\xe8de': 43,
u'pcd': 43,
u'i\xf8k': 43,
u'dcs': 43,
u'\xf8fi': 43,
u'llr': 43,
u'kyh': 43,
u'iau': 43,
u'tb\xf8': 43,
u'yho': 43,
u'al\xed': 43,
u'ltg': 43,
u'tco': 43,
u'dhy': 43,
u'ttf': 43,
u'ziz': 43,
u'rh\xf4': 43,
u'jlm': 43,
u'vfe': 43,
u'h\xf4n': 43,
u'kkl': 43,
u'\xe6sp': 43,
u'jdo': 43,
u'n\xe6l': 43,
u'ksj': 43,
u'd\xe9m': 43,
u'pr\xed': 43,
u'h\xe4u': 43,
u'izz': 43,
u'efv': 43,
u'bfo': 43,
u'riy': 43,
u'i\u010di': 43,
u'tju': 43,
u'iry': 42,
u'tvd': 42,
u'\xe8ge': 42,
u'sg\xf8': 42,
u'a#q': 42,
u'aio': 42,
u'gn\xe8': 42,
u'guw': 42,
u'ffc': 42,
u'aq#': 42,
u'wir': 42,
u'n\xe8d': 42,
u'#nt': 42,
u'\xe6li': 42,
u'rx#': 42,
u'nw\xfc': 42,
u'\xe5lk': 42,
u'm\xe5k': 42,
u'#\xe5h': 42,
u'\xb4s#': 42,
u'y\u0144s': 42,
u'm\xe9d': 42,
u'tca': 42,
u'n\xe9n': 42,
u'ol\xf8': 42,
u'\xf8vs': 42,
u'say': 42,
u'uir': 42,
u't\xfcr': 42,
u'bta': 42,
u'fnb': 42,
u'\xe9l\xe8': 42,
u'zed': 42,
u'x#u': 42,
u'uwu': 42,
u'zy\u0144': 42,
u'mco': 42,
u'op\xe9': 42,
u'bmw': 42,
u'\xe5sp': 42,
u'daa': 42,
u'dao': 42,
u'#p\xe1': 42,
u'faz': 42,
u'yvu': 42,
u'r#\xf3': 42,
u'rmm': 42,
u'l\xe6t': 42,
u'\xe9ro': 41,
u'mb#': 41,
u'\xednc': 41,
u'jew': 41,
u'nrs': 41,
u'dzk': 41,
u'jae': 41,
u'\xe9by': 41,
u'eiw': 41,
u'amg': 41,
u'#ao': 41,
u'd\xe6n': 41,
u'eeg': 41,
u'gyr': 41,
u'orj': 41,
u'v\xf8k': 41,
u'#hf': 41,
u'euj': 41,
u'vip': 41,
u'vtu': 41,
u'uyk': 41,
u'yh\xf8': 41,
u'e\xf8j': 41,
u'n\u0161a': 41,
u'fks': 41,
u'tcy': 41,
u'vfj': 41,
u'zie': 41,
u'np\xe5': 41,
u'mpg': 41,
u'l#q': 41,
u'hlf': 41,
u'luh': 41,
u'luj': 41,
u'vfr': 41,
u'kks': 41,
u'd\xf8n': 41,
u'enj': 41,
u'an\u0161': 41,
u'pv\xe5': 41,
u'mfk': 41,
u'zov': 40,
u'\xe6p#': 40,
u'hex': 40,
u'\xf8me': 40,
u'amw': 40,
u'w#l': 40,
u'aip': 40,
u'noa': 40,
u'dvd': 40,
u'#j\xf3': 40,
u'eai': 40,
u'sj#': 40,
u'drz': 40,
u'wu#': 40,
u'#nk': 40,
u'dky': 40,
u'eya': 40,
u'cu\xe9': 40,
u'#zw': 40,
u'ppp': 40,
u'dps': 40,
u'nld': 40,
u'\xe5dd': 40,
u'i\xe8g': 40,
u'##+': 40,
u'\xe8sb': 40,
u'\xe5pr': 40,
u'#nd': 40,
u'spp': 40,
u'k\xf3s': 40,
u'lua': 40,
u'#l\xf6': 40,
u'bm\xe6': 40,
u'\xe0s#': 40,
u'mw#': 40,
u'gnb': 40,
u'ybu': 40,
u'gkm': 40,
u'uwe': 40,
u'ynh': 40,
u'#tz': 40,
u'vaj': 40,
u'dn#': 39,
u'gfs': 39,
u'gfu': 39,
u'jeo': 39,
u'm\xe6d': 39,
u'nkc': 39,
u'yv\xe6': 39,
u'mrl': 39,
u'v\xe1n': 39,
u'tf#': 39,
u'eea': 39,
u'oxa': 39,
u'ov\xf8': 39,
u'gyu': 39,
u'vdv': 39,
u'vdo': 39,
u'i\xf8e': 39,
u'iib': 39,
u'dcm': 39,
u'\xfcrk': 39,
u'r\xfcf': 39,
u'll\xe1': 39,
u'oys': 39,
u'\xe6av': 39,
u'bdi': 39,
u'\u0161ti': 39,
u'whe': 39,
u'wha': 39,
u'nd\xe9': 39,
u'dd#': 39,
u'cpa': 39,
u'pd#': 39,
u'\u017eda': 39,
u'ktn': 39,
u'#\u017ed': 39,
u'l#x': 39,
u'yga': 39,
u'zny': 39,
u'#dp': 39,
u'gok': 39,
u'\xf3s#': 39,
u'jgr': 39,
u'wbo': 39,
u'duh': 39,
u'dgm': 38,
u'gbu': 38,
u'eu\xf8': 38,
u'\xe0#f': 38,
u'asz': 38,
u'xho': 38,
u'boi': 38,
u'vho': 38,
u'hfc': 38,
u'mmm': 38,
u's\xe9r': 38,
u's\xe9e': 38,
u'sff': 38,
u'\xe9mo': 38,
u'#v\xe4': 38,
u'fce': 38,
u'#rt': 38,
u'ylm': 38,
u'apm': 38,
u'ltz': 38,
u'#v\u0103': 38,
u'ejc': 38,
u'anq': 38,
u'u#\xe9': 38,
u'v\u0103l': 38,
u'zee': 38,
u'x#r': 38,
u'#\xbac': 38,
u'byz': 38,
u'yrf': 38,
u'xla': 38,
u'mki': 38,
u'\u0103le': 38,
u'daq': 38,
u'piv': 38,
u'ywo': 38,
u'te\xe5': 38,
u'#k\xf3': 38,
u'vap': 38,
u'lvg': 38,
u'vs\xe6': 37,
u'brc': 37,
u'br\xfc': 37,
u'k\xe9s': 37,
u'i\xf1a': 37,
u'loh': 37,
u's\xe6b': 37,
u'jir': 37,
u'rgd': 37,
u'iiv': 37,
u'\xe6#t': 37,
u'poh': 37,
u'v\xe9r': 37,
u'apv': 37,
u'#mt': 37,
u'dds': 37,
u'evr': 37,
u'exh': 37,
u'ntz': 37,
u'zad': 37,
u'idm': 37,
u'vbi': 37,
u'hlq': 37,
u'shw': 37,
u'#lh': 37,
u'\xe4rn': 37,
u'www': 37,
u's\u0142a': 37,
u'tui': 37,
u'vno': 37,
u'rax': 37,
u'ocl': 37,
u'icp': 37,
u'te\xe6': 37,
u'r#\u0151': 37,
u'ync': 37,
u'lj#': 37,
u'kb\xe6': 37,
u'\xf8pl': 37,
u'\xfcff': 37,
u'\u0151k\xe9': 36,
u'jt\xe6': 36,
u'm\xf3n': 36,
u'#c\xe6': 36,
u'#ct': 36,
u'nrd': 36,
u'h\xe9#': 36,
u'w#p': 36,
u'w#u': 36,
u'soh': 36,
u'\xf3rd': 36,
u'pne': 36,
u'dzl': 36,
u'ifk': 36,
u'zse': 36,
u'eym': 36,
u'pge': 36,
u'dco': 36,
u'lll': 36,
u'xpo': 36,
u'\xf1ez': 36,
u'tcw': 36,
u'bce': 36,
u'hoi': 36,
u'\xf1ig': 36,
u'\xe6ma': 36,
u'vi\xf1': 36,
u'ftc': 36,
u'ueu': 36,
u'fnc': 36,
u'p\xf8s': 36,
u'\xe1r#': 36,
u'hl\xed': 36,
u'snc': 36,
u'rlg': 36,
u'#lr': 36,
u't\u0151k': 36,
u'ryc': 36,
u'vnf': 36,
u'#vh': 36,
u'+or': 36,
u'arj': 36,
u'h#\xf6': 36,
u'tzk': 35,
u'bn#': 35,
u'#c\xf3': 35,
u'aaa': 35,
u'c\xe6s': 35,
u'w##': 35,
u'w#g': 35,
u'w#k': 35,
u'\xe1il': 35,
u'v\xe4y': 35,
u'gnf': 35,
u'#aw': 35,
u'#\xe6t': 35,
u'oz#': 35,
u'ngw': 35,
u'yeb': 35,
u'boc': 35,
u'e#x': 35,
u'pce': 35,
u'j\xf3z': 35,
u'\xe6#d': 35,
u'llg': 35,
u'\xe4yr': 35,
u'\xe5d\xf8': 35,
u'hki': 35,
u'\u010di\u010d': 35,
u'fki': 35,
u'\xed\u010de': 35,
u'ipz': 35,
u'axh': 35,
u'ftg': 35,
u'mlr': 35,
u'rd\xe5': 35,
u'pts': 35,
u'\u012ble': 35,
u'idc': 35,
u'fdl': 35,
u'yry': 35,
u'tyl': 35,
u'lqv': 35,
u'ulh': 35,
u'l\xed\u010d': 35,
u'f\xe9e': 35,
u'k\xe6t': 35,
u'p#y': 35,
u'p#w': 35,
u'z\u012bl': 35,
u'ejp': 35,
u'e\xdfl': 35,
u'okn': 35,
u'c\xf3r': 35,
u'pvu': 35,
u'yfu': 34,
u'bna': 34,
u'\xf1ov': 34,
u'\xbac#': 34,
u'\xfcl#': 34,
u'jat': 34,
u'jai': 34,
u'soe': 34,
u'eix': 34,
u'tbt': 34,
u'ke\xdf': 34,
u'\u0117s#': 34,
u'vha': 34,
u'tvn': 34,
u'ahb': 34,
u'gih': 34,
u'kyk': 34,
u'pov': 34,
u'\xf1as': 34,
u'\xf1a#': 34,
u'e\xf8e': 34,
u'#ix': 34,
u'r\u010di': 34,
u'\u010di\u016b': 34,
u'vax': 34,
u'hoh': 34,
u'atw': 34,
u'tt\xe6': 34,
u'vi\u0107': 34,
u'i\u016bt': 34,
u'exb': 34,
u'\xdf#f': 34,
u'eoi': 34,
u'hp#': 34,
u'htu': 34,
u'zac': 34,
u'idj': 34,
u'na\xef': 34,
u'awr': 34,
u'wko': 34,
u'l\xed#': 34,
u'lyw': 34,
u'\xfcmt': 34,
u'epg': 34,
u'hui': 34,
u'gkv': 34,
u'\xe5fu': 34,
u'p\xe1l': 34,
u'ar\u010d': 34,
u'pzi': 34,
u'j#\xf8': 33,
u'jtl': 33,
u'fvo': 33,
u'ezs': 33,
u'\u0161ev': 33,
u'cry': 33,
u'ciz': 33,
u'\xf8ta': 33,
u'nky': 33,
u'gnp': 33,
u'ncz': 33,
u'ozc': 33,
u'#\xb0#': 33,
u'\xedao': 33,
u'#bt': 33,
u'#b\xfc': 33,
u'yza': 33,
u'gof': 33,
u'v\xf8d': 33,
u'\xf8nr': 33,
u't\xe6e': 33,
u'syf': 33,
u'\xe6#a': 33,
u'oag': 33,
u'kuk': 33,
u'#rf': 33,
u'siz': 33,
u'#vd': 33,
u'hog': 33,
u'yha': 33,
u'exo': 33,
u'exx': 33,
u'pdu': 33,
u'uee': 33,
u'ytj': 33,
u'h\xf8g': 33,
u'mpb': 33,
u'mp\xf8': 33,
u'#\u03b1#': 33,
u'tyu': 33,
u'#t\u0151': 33,
u'nyp': 33,
u'nyj': 33,
u'#\xad#': 33,
u'\xe5sn': 33,
u'ioj': 33,
u'msc': 33,
u'ylc': 33,
u'efn': 33,
u'#k\xe4': 33,
u'czm': 32,
u'rgj': 32,
u'tzh': 32,
u'tv\xe5': 32,
u'psm': 32,
u'\xe6ki': 32,
u'\xe9be': 32,
u'rnw': 32,
u'#ss': 32,
u'ymf': 32,
u'drs': 32,
u'zwi': 32,
u'm\xf6l': 32,
u'\u0103se': 32,
u'e#+': 32,
u'ge\u013e': 32,
u'vh\xe5': 32,
u'gao': 32,
u'ldw': 32,
u'pgj': 32,
u'k\xe4r': 32,
u'vp\xe5': 32,
u'xxo': 32,
u'drc': 32,
u'oue': 32,
u'yuk': 32,
u'\xf8ru': 32,
u'#a\xf1': 32,
u'dyl': 32,
u'ml\xe5': 32,
u'b\xf8h': 32,
u'\xe1k#': 32,
u'btr': 32,
u'mht': 32,
u'eoo': 32,
u'zet': 32,
u'ykv': 32,
u'#u#': 32,
u'#hm': 32,
u'ecm': 32,
u'\xf8hm': 32,
u'hdo': 32,
u'abm': 32,
u'l\xe9#': 32,
u'bih': 32,
u'peh': 32,
u'gch': 32,
u'pii': 32,
u'yj#': 32,
u'rm\xe0': 32,
u'guj': 32,
u'bbs': 32,
u'\u015foi': 31,
u'uot': 31,
u'mb\xf8': 31,
u'#q#': 31,
u'fim': 31,
u'g\xf6n': 31,
u'zko': 31,
u'p\xf6#': 31,
u'\xf3zs': 31,
u'cwk': 31,
u'cme': 31,
u'er\xf6': 31,
u'\xe0#o': 31,
u'obd': 31,
u'psn': 31,
u'dzo': 31,
u'\xe5nl': 31,
u'fpu': 31,
u'xsy': 31,
u'uky': 31,
u'\xf8un': 31,
u'cgo': 31,
u'vkl': 31,
u'\xfank': 31,
u'kmo': 31,
u'#b\xe1': 31,
u'pcf': 31,
u'u\u015fo': 31,
u'mmy': 31,
u'gak': 31,
u'zhn': 31,
u'\xf6nc': 31,
u'chy': 31,
u'dce': 31,
u'rcu': 31,
u'tox': 31,
u'aay': 31,
u'u\xe6g': 31,
u'bd#': 31,
u'ouh': 31,
u'viu': 31,
u'ndw': 31,
u'\xf8ks': 31,
u'uea': 31,
u'btl': 31,
u'laq': 31,
u'\xf3w#': 31,
u'mtj': 31,
u'u\xeds': 31,
u'lmm': 31,
u'lu\xed': 31,
u'r\xf6s': 31,
u'\xf8ln': 31,
u'e\u013e#': 31,
u'ys\xe5': 31,
u'bu\u015f': 31,
u'tm#': 31,
u'mwo': 31,
u'lbt': 31,
u'kwa': 31,
u'\xf3s\xe1': 31,
u'bf#': 31,
u'ebt': 31,
u'dgl': 30,
u'kfl': 30,
u'tzb': 30,
u'tvm': 30,
u'tvf': 30,
u'zky': 30,
u'evc': 30,
u'rje': 30,
u'cve': 30,
u'k#z': 30,
u'b\u0103s': 30,
u'yag': 30,
u'#z\u012b': 30,
u'#b\u0103': 30,
u'ra\xf1': 30,
u'#f\xe1': 30,
u'\xe6#v': 30,
u'ubn': 30,
u'\xf8fg': 30,
u'\xe1n\xe9': 30,
u'wli': 30,
u'\xe9#r': 30,
u'\xe5dv': 30,
u'r\xe4s': 30,
u'\xf3#o': 30,
u'#mk': 30,
u'at\xfc': 30,
u'blb': 30,
u'gdy': 30,
u'cps': 30,
u'b\xf8f': 30,
u'\u0161ko': 30,
u'mhz': 30,
u'ghl': 30,
u'ghj': 30,
u'vuk': 30,
u'tly': 30,
u'td#': 30,
u'dwe': 30,
u'otm': 30,
u'tnu': 30,
u'iwi': 30,
u'ewc': 30,
u'ysf': 30,
u'cjd': 30,
u'\xf8dy': 30,
u'goc': 30,
u'rmr': 30,
u'mm\xe9': 30,
u'dnj': 29,
u'\xe6kr': 29,
u'xv#': 29,
u'jis': 29,
u'fme': 29,
u'\xedns': 29,
u'uos': 29,
u'k\xe1s': 29,
u'e\xe5#': 29,
u'vgo': 29,
u'iju': 29,
u'rbj': 29,
u'br\xe9': 29,
u'y#z': 29,
u'fbf': 29,
u'ib\xe6': 29,
u'z\xf3n': 29,
u'pno': 29,
u'azf': 29,
u'psb': 29,
u'ysh': 29,
u'ifs': 29,
u'nc\xf8': 29,
u'bgb': 29,
u'eay': 29,
u'\xedas': 29,
u'\xf3ns': 29,
u'mii': 29,
u'\xf8ba': 29,
u'\xe6#h': 29,
u'rcd': 29,
u'zde': 29,
u'q#a': 29,
u'+#t': 29,
u'aws': 29,
u'gij': 29,
u'\xe5ha': 29,
u'wre': 29,
u'suo': 29,
u'\xf1ag': 29,
u'jlu': 29,
u'xpa': 29,
u'dhs': 29,
u'axe': 29,
u'u#\u017e': 29,
u'klm': 29,
u'se\xe6': 29,
u'\xf8og': 29,
u'kpi': 29,
u'c\xf8r': 29,
u'\xe1#h': 29,
u'b\xe1r': 29,
u'aon': 29,
u'wca': 29,
u'uxb': 29,
u'wo#': 29,
u'hli': 29,
u'm\xe8r': 29,
u'upn': 29,
u'bap': 29,
u'diz': 29,
u'afj': 29,
u'cnn': 29,
u'unj': 29,
u'ar\xe1': 29,
u'duo': 29,
u'dug': 29,
u'\xe6gc': 29,
u'#mw': 29,
u'irh': 28,
u'\xf8to': 28,
u't\xe9r': 28,
u'+#k': 28,
u'tr\xe3': 28,
u'jaz': 28,
u'l\xf6\xf6': 28,
u'hry': 28,
u'uuk': 28,
u'#j\u0119': 28,
u'j\u0119d': 28,
u'kii': 28,
u'yeu': 28,
u'\xf6\xf6w': 28,
u'fs\xf8': 28,
u'rgg': 28,
u'zho': 28,
u'\xe5l\xf8': 28,
u'\xfcri': 28,
u'd\u017ei': 28,
u'kyb': 28,
u'kyv': 28,
u'alw': 28,
u'xle': 28,
u'jsg': 28,
u'\xe0be': 28,
u'i#x': 28,
u'apf': 28,
u'lt\xf8': 28,
u'joo': 28,
u'xik': 28,
u'fco': 28,
u'ne\u0161': 28,
u'cpn': 28,
u'gwa': 28,
u'uxs': 28,
u'liy': 28,
u'b\xe9r': 28,
u'fhe': 28,
u'#lg': 28,
u'abh': 28,
u'gga': 28,
u'ryr': 28,
u'uhr': 28,
u'cfa': 28,
u'cfs': 28,
u'n\xe6n': 28,
u'req': 28,
u'\xe5ny': 28,
u'vco': 28,
u'cnd': 28,
u'an\xe9': 28,
u'\xe9gr': 28,
u'dye': 28,
u'\u0119dr': 28,
u'\xe5bo': 28,
u'kb\xf8': 28,
u'hmu': 28,
u'hms': 28,
u'\u0144#o': 27,
u'vlh': 27,
u'dfe': 27,
u'cik': 27,
u'aey': 27,
u'ofv': 27,
u'nkp': 27,
u'er\xf3': 27,
u'mvo': 27,
u'hru': 27,
u'wa\u0142': 27,
u'ib\xf8': 27,
u'#s\xe2': 27,
u's\xe2r': 27,
u'g\xe6e': 27,
u'\xe1ur': 27,
u'eah': 27,
u'iq#': 27,
u'uju': 27,
u'pco': 27,
u'\xf8ng': 27,
u'\xf8n\xe6': 27,
u'\u0161de': 27,
u'gac': 27,
u'pg#': 27,
u'sfm': 27,
u'ehp': 27,
u'js\xf8': 27,
u'rt\xe9': 27,
u'\xe4ub': 27,
u'\xf3#s': 27,
u'xac': 27,
u'#m\xf6': 27,
u'e\u0161d': 27,
u'ydg': 27,
u'sao': 27,
u'xaf': 27,
u'vmd': 27,
u'zeb': 27,
u'ktc': 27,
u'\xe2rb': 27,
u'htm': 27,
u'zaf': 27,
u'\xe1ns': 27,
u'td\xe6': 27,
u'hha': 27,
u'pys': 27,
u'\xf8s\xf8': 27,
u'mcs': 27,
u'hrf': 27,
u'\xf6lz': 27,
u'j\xe1u': 27,
u'aiz': 27,
u'pr\xe9': 27,
u'#pd': 27,
u'rvn': 27,
u'igc': 27,
u'\xe1rb': 27,
u'efc': 27,
u'hif': 27,
u'p+o': 27,
u'kby': 27,
u'mfp': 27,
u'x#l': 26,
u'iv\xe1': 26,
u't#\xf6': 26,
u'f\xe6t': 26,
u'bvi': 26,
u'evg': 26,
u'evp': 26,
u'jay': 26,
u'kr#': 26,
u'in\xe9': 26,
u'mvf': 26,
u'emc': 26,
u'gnu': 26,
u'yvo': 26,
u'mr\xf8': 26,
u'puk': 26,
u'fnh': 26,
u'cga': 26,
u'hbi': 26,
u'\xf8jm': 26,
u'yyn': 26,
u'#bc': 26,
u'\xfcma': 26,
u'miv': 26,
u'uib': 26,
u'\xf6vp': 26,
u'ftj': 26,
u's\xe5t': 26,
u'f\xf8t': 26,
u'tcl': 26,
u'tcd': 26,
u'vay': 26,
u'e\u0161t': 26,
u'khu': 26,
u'acl': 26,
u'oh#': 26,
u'le\u0161': 26,
u'ckf': 26,
u'g\xfcl': 26,
u'o\u0161e': 26,
u'spf': 26,
u'gbt': 26,
u'yta': 26,
u'uxh': 26,
u'b#\xe6': 26,
u'yob': 26,
u'rhy': 26,
u'yrg': 26,
u'awn': 26,
u'shb': 26,
u'iny': 26,
u'otj': 26,
u'jlk': 26,
u'\xf2s#': 26,
u'yl\xe6': 26,
u'ryy': 26,
u'ksc': 26,
u'\u0107#o': 26,
u'xfr': 26,
u'ocm': 26,
u'lcr': 26,
u'\xe5fo': 26,
u'hij': 26,
u'ri\xf1': 26,
u'xs#': 26,
u'a\u0142o': 25,
u'zre': 25,
u'\xf8mp': 25,
u'dbb': 25,
u'dbp': 25,
u'er\xe9': 25,
u'soi': 25,
u'yit': 25,
u'\xe5es': 25,
u'azu': 25,
u'lo\u0161': 25,
u'am\xe8': 25,
u'dzu': 25,
u'nnn': 25,
u'xpe': 25,
u'e#\xf6': 25,
u'#cj': 25,
u'kmy': 25,
u'z\xfcr': 25,
u'bkr': 25,
u'sbs': 25,
u'oea': 25,
u'#fv': 25,
u'n\xe4n': 25,
u'oap': 25,
u'kuv': 25,
u'#z\xfc': 25,
u'+#f': 25,
u'n\u0117#': 25,
u'llt': 25,
u'fcg': 25,
u'\xad#a': 25,
u'oym': 25,
u'\xf6zd': 25,
u'#rd': 25,
u'\xe9#j': 25,
u'ixa': 25,
u'\xe6kp': 25,
u'\xe5h\xf8': 25,
u'xt#': 25,
u'atx': 25,
u'lp\xe6': 25,
u'lpt': 25,
u'acp': 25,
u'cpp': 25,
u'jc#': 25,
u'rdg': 25,
u'kp#': 25,
u'djm': 25,
u'ckr': 25,
u'btp': 25,
u'\xe9li': 25,
u'\xf8g\xe5': 25,
u'm\xe0#': 25,
u'\xe6br': 25,
u'z#c': 25,
u'tkn': 25,
u'\u017ei\u0107': 25,
u'flg': 25,
u'\u0103us': 25,
u'hds': 25,
u'#tg': 25,
u'f\xe1i': 25,
u'#dz': 25,
u'n\xe6f': 25,
u'yso': 25,
u'ysy': 25,
u'ioo': 25,
u'ioi': 25,
u'pik': 25,
u'zba': 25,
u'cni': 25,
u'tiq': 25,
u'izh': 25,
u'yn\xe4': 25,
u'fbl': 25,
u'zvo': 25,
u'x#c': 24,
u'soy': 24,
u'\xe4ss': 24,
u'j#c': 24,
u'xko': 24,
u'jil': 24,
u'zoe': 24,
u'xop': 24,
u'hz#': 24,
u'\xe6ti': 24,
u'aeg': 24,
u'uco': 24,
u'cva': 24,
u'ofh': 24,
u'+fo': 24,
u'\xe0#h': 24,
u'f\xf3n': 24,
u'k#\xe9': 24,
u'uzi': 24,
u'caz': 24,
u'm#q': 24,
u'ncf': 24,
u'c\xf2s': 24,
u'm#x': 24,
u'e+p': 24,
u'fpl': 24,
u'ad\u017e': 24,
u'cym': 24,
u'ke\xe6': 24,
u'\u0119sa': 24,
u'wup': 24,
u'#bv': 24,
u'jby': 24,
u'ldj': 24,
u'ld\xe5': 24,
u'dca': 24,
u'rrh': 24,
u'#zv': 24,
u'nhy': 24,
u'+#v': 24,
u'hsd': 24,
u'llk': 24,
u's\xe1k': 24,
u'ahh': 24,
u'u\u010de': 24,
u'm\xe1s': 24,
u'fcs': 24,
u'tke': 24,
u'\u0142\u0119s': 24,
u'\xe9es': 24,
u'rkd': 24,
u'#rs': 24,
u'\xe5dh': 24,
u'fg#': 24,
u'ou\u010d': 24,
u'jo\xe3': 24,
u'jof': 24,
u'ipn': 24,
u'tt\xe0': 24,
u'k\xf8j': 24,
u'\xe9ta': 24,
u'r\xe8r': 24,
u'pdc': 24,
u'xav': 24,
u'npv': 24,
u'#yv': 24,
u'vma': 24,
u'fnd': 24,
u'v\xe6m': 24,
u'nt\xe9': 24,
u'\xefr#': 24,
u'th\xe6': 24,
u'vdd': 24,
u'zao': 24,
u'\xe5rg': 24,
u'as\xe1': 24,
u'pug': 24,
u'puc': 24,
u'\xe9d\xe9': 24,
u'fda': 24,
u'\u016bna': 24,
u'ot\xe1': 24,
u'zyk': 24,
u'kga': 24,
u'o\xe3o': 24,
u'tya': 24,
u'tyt': 24,
u'cs\xe1': 24,
u'fec': 24,
u'a\u0142\u0119': 24,
u'bi\u0144': 24,
u'n\xe6m': 24,
u'lfb': 24,
u'k\xe6g': 24,
u'nnp': 24,
u'lns': 24,
u'jui': 24,
u'd\xe5b': 24,
u'ef\xf3': 24,
u'rr\xe9': 24,
u'rr\xe8': 24,
u'hm\xf8': 24,
u'zve': 24,
u'\xe6kl': 23,
u'zri': 23,
u'ezv': 23,
u'psg': 23,
u'\xf8ek': 23,
u'yvt': 23,
u'ejm': 23,
u'am\xe4': 23,
u'am\xf3': 23,
u'\xf8#r': 23,
u'rj\xe6': 23,
u'\xf8#\xf8': 23,
u'jrm': 23,
u'ncr': 23,
u'eec': 23,
u'p\xe6k': 23,
u'ngc': 23,
u'#j\xfc': 23,
u'hny': 23,
u'me\xe5': 23,
u'zwa': 23,
u'bmo': 23,
u'\xed\xf1i': 23,
u'v#w': 23,
u'fsc': 23,
u'pwa': 23,
u'pch': 23,
u'ycy': 23,
u'jbo': 23,
u'zhi': 23,
u'#fk': 23,
u'\xe6us': 23,
u'dct': 23,
u'#zr': 23,
u'xx#': 23,
u'hsa': 23,
u'tgv': 23,
u'ixo': 23,
u'edc': 23,
u'hke': 23,
u'##\xba': 23,
u'##\xb5': 23,
u'lpg': 23,
u'prs': 23,
u'sjk': 23,
u'jki': 23,
u'ttp': 23,
u'\xad#o': 23,
u'le\xf8': 23,
u'zib': 23,
u'mhi': 23,
u'hpa': 23,
u'\xe9la': 23,
u'gls': 23,
u'ni\xf3': 23,
u'niy': 23,
u'thw': 23,
u'ekb': 23,
u'aoa': 23,
u'aok': 23,
u'za\xef': 23,
u'yty': 23,
u'pu#': 23,
u'st\xf3': 23,
u'\xe6f\xe6': 23,
u'#lp': 23,
u'd\xf8k': 23,
u'td\xf8': 23,
u'\xe6sg': 23,
u'ggs': 23,
u'csr': 23,
u'ysm': 23,
u'epw': 23,
u't\xf3w': 23,
u'm\xf8#': 23,
u'\xe5fy': 23,
u'wne': 23,
u'owu': 23,
u'gff': 22,
u'aap': 22,
u'zka': 22,
u'\xe6tp': 22,
u'#\xba#': 22,
u'sg#': 22,
u'w#n': 22,
u'sz\xe9': 22,
u'in\xf8': 22,
u'yib': 22,
u'uvr': 22,
u'urq': 22,
u'xsa': 22,
u'axf': 22,
u'ajp': 22,
u'toj': 22,
u's\xe9d': 22,
u'#\xf6z': 22,
u'#\xed\xf1': 22,
u'\xe6#p': 22,
u'rcr': 22,
u'm\xe5g': 22,
u'cl\xf8': 22,
u'\xfcrg': 22,
u'mua': 22,
u'muc': 22,
u'gt\xe6': 22,
u'shf': 22,
u'rkp': 22,
u'ya\xf1': 22,
u'wla': 22,
u'm\xedr': 22,
u'e\xe5n': 22,
u'xim': 22,
u'\xe4nd': 22,
u'\xf8v\xe6': 22,
u'ddt': 22,
u'xae': 22,
u'odc': 22,
u'\xdf#o': 22,
u'mhp': 22,
u'eo\xf8': 22,
u'nix': 22,
u'bpo': 22,
u'sph': 22,
u'aof': 22,
u'gwi': 22,
u'b#\xe5': 22,
u'hho': 22,
u'n+#': 22,
u'#ht': 22,
u'snp': 22,
u'#\xf8h': 22,
u'mgk': 22,
u'\xe9sa': 22,
u'is\xe5': 22,
u'mcv': 22,
u'wwf': 22,
u'hd#': 22,
u'de\xdf': 22,
u'okh': 22,
u'afy': 22,
u'vr\xe5': 22,
u'vci': 22,
u'k\xe6i': 22,
u'zbi': 22,
u'ms\xe5': 22,
u'e\xdf#': 22,
u'huj': 22,
u'ssg': 22,
u'rmc': 22,
u'duq': 22,
u'kbo': 22,
u'mfn': 22,
u'\xe9rg': 21,
u'#cg': 21,
u'a\xeds': 21,
u'\xe6km': 21,
u'ez\xf3': 21,
u'ezk': 21,
u'jei': 21,
u'rfm': 21,
u'#gw': 21,
u'\xe1#a': 21,
u'\xe1#d': 21,
u'szt': 21,
u'cvi': 21,
u'#\xf6v': 21,
u'ob\xe5': 21,
u'wam': 21,
u'ib\xe9': 21,
u'#\xb5g': 21,
u'j\xe6t': 21,
u'\xed#f': 21,
u'm#\xf6': 21,
u'yac': 21,
u'bcr': 21,
u'\xe6fo': 21,
u'ma\u0161': 21,
u'y\xf6s': 21,
u'bka': 21,
u'o#q': 21,
u'i\xf8v': 21,
u'ufb': 21,
u'm\xe5h': 21,
u'g#\u03b1': 21,
u'\xe1ng': 21,
u'wlf': 21,
u'aab': 21,
u'xpl': 21,
u'ep\xe5': 21,
u'siw': 21,
u'six': 21,
u'joy': 21,
u's#x': 21,
u'xte': 21,
u'oqu': 21,
u'\xe4us': 21,
u'lpd': 21,
u'n\xe9e': 21,
u'\xe9pr': 21,
u'jkb': 21,
u'ydb': 21,
u'bl#': 21,
u'\xf8kl': 21,
u'\xe6va': 21,
u'exk': 21,
u'ag\xf3': 21,
u'zef': 21,
u'zew': 21,
u'yeh': 21,
u'ni\xf1': 21,
u'gh\xf8': 21,
u'aoe': 21,
u'fsy': 21,
u'z#w': 21,
u'vb\xf8': 21,
u'l\xe5h': 21,
u'ecv': 21,
u'awk': 21,
u'jlo': 21,
u'#l\xfc': 21,
u'r\xe1n': 21,
u'tuf': 21,
u'mow': 21,
u'rym': 21,
u'uhj': 21,
u'sdp': 21,
u'g\xf3n': 21,
u'fyn': 21,
u'pif': 21,
u'wfo': 21,
u'jyl': 21,
u'izy': 21,
u'efg': 21,
u'ar\xe9': 21,
u'kbu': 21,
u'kfb': 20,
u'osz': 20,
u'osg': 20,
u'\xfctt': 20,
u'jtb': 20,
u'\xf8my': 20,
u'+#m': 20,
u'oj#': 20,
u'kn\xf6': 20,
u'bva': 20,
u'tdt': 20,
u'r\xf3s': 20,
u'\xe5rk': 20,
u'ayd': 20,
u'yr\xf8': 20,
u'\xe1in': 20,
u'hrb': 20,
u'foi': 20,
u'cak': 20,
u'#s\xfc': 20,
u'am\xe1': 20,
u'srs': 20,
u'loz': 20,
u'jrr': 20,
u'ifm': 20,
u'cys': 20,
u'cyc': 20,
u'doi': 20,
u'\xefre': 20,
u'zla': 20,
u'oz\xf3': 20,
u'syu': 20,
u'chh': 20,
u'ui\xe8': 20,
u'sfb': 20,
u'v\xe1r': 20,
u'dch': 20,
u'\xe6#b': 20,
u'\xf6rr': 20,
u'n\u0117s': 20,
u'hsh': 20,
u'ky\xf6': 20,
u'#\xe5s': 20,
u'eh\xe9': 20,
u'\xe6vs': 20,
u'imh': 20,
u'\xf2#f': 20,
u'\xfbte': 20,
u'\u010d#o': 20,
u'edz': 20,
u'#\u010de': 20,
u'ouq': 20,
u'\xf8vp': 20,
u'hch': 20,
u'xme': 20,
u'b\xfct': 20,
u'nph': 20,
u'kpe': 20,
u'ckn': 20,
u'fnn': 20,
u'yp#': 20,
u'co\xfb': 20,
u'htb': 20,
u'dbt': 20,
u'mpp': 20,
u'a#\u0163': 20,
u'jpo': 20,
u'naj': 20,
u'cdd': 20,
u'#hp': 20,
u'r\xe5e': 20,
u'jlh': 20,
u'#ls': 20,
u'vju': 20,
u'paw': 20,
u'pax': 20,
u'r\xe9t': 20,
u'o\xfbt': 20,
u'fui': 20,
u'jda': 20,
u'ej\xe6': 20,
u'an\u0163': 20,
u'dlr': 20,
u'bbl': 20,
u'fap': 20,
u'fae': 20,
u'yn\xe6': 20,
u'taz': 20,
u'#mn': 20,
u'n\xf6r': 20,
u'p\xe9#': 19,
u'fip': 19,
u'kfe': 19,
u'yfi': 19,
u'k\xf6n': 19,
u'osd': 19,
u'ji\xf8': 19,
u'#c\xe1': 19,
u'wri': 19,
u'tr\xe9': 19,
u'in\xe1': 19,
u'pju': 19,
u'obc': 19,
u'\xed#a': 19,
u'd\xe6s': 19,
u'svr': 19,
u'ozi': 19,
u'ozz': 19,
u'jna': 19,
u'yai': 19,
u'c#j': 19,
u'#$#': 19,
u'#nc': 19,
u'vd\xf8': 19,
u'#n\xe9': 19,
u'u\xb4s': 19,
u'fsr': 19,
u'i\xe1n': 19,
u'or\xe1': 19,
u'\xf8jo': 19,
u'lky': 19,
u'\xf6ni': 19,
u'#\xf6r': 19,
u'eu\xb4': 19,
u'miq': 19,
u'ubc': 19,
u'vp#': 19,
u'\xf6ry': 19,
u'#zd': 19,
u'\xfcr#': 19,
u'ur\xe9': 19,
u'+#i': 19,
u'el\xe9': 19,
u'#\xe1n': 19,
u'fca': 19,
u'#\xe5d': 19,
u'qas': 19,
u's\xf8p': 19,
u'apd': 19,
u'j\xfcr': 19,
u'ouj': 19,
u'n\xe9s': 19,
u'foa': 19,
u'khr': 19,
u'\xf8vd': 19,
u'fpe': 19,
u'\xe9tk': 19,
u'acd': 19,
u'hgr': 19,
u'se\xe5': 19,
u'nps': 19,
u'btv': 19,
u'mh#': 19,
u'la\xe2': 19,
u'\u03b1#a': 19,
u'mtb': 19,
u'htp': 19,
u'aog': 19,
u'ghb': 19,
u'o\xebl': 19,
u'as\xe6': 19,
u'b\xe9a': 19,
u'ycl': 19,
u'jlg': 19,
u'n#+': 19,
u'mc#': 19,
u'zui': 19,
u'qs#': 19,
u'\xe4in': 19,
u'\xe9ke': 19,
u'abj': 19,
u'b\xe6v': 19,
u'ikg': 19,
u'bi\xf8': 19,
u'daz': 19,
u'sih': 19,
u'#kd': 19,
u'\xe4ck': 19,
u'lbj': 19,
u'wf#': 19,
u'd\xe9g': 19,
u'igy': 19,
u'bfr': 19,
u'vny': 19,
u'wni': 19,
u'xre': 19,
u'p\xe9e': 18,
u'ir\xe3': 18,
u'\xf8tu': 18,
u'cui': 18,
u'sow': 18,
u't\xe9p': 18,
u'gwp': 18,
u'tvb': 18,
u'vlf': 18,
u'ugm': 18,
u'ijz': 18,
u'evl': 18,
u'r\xf3b': 18,
u'b\xad#': 18,
u'w#w': 18,
u'rb#': 18,
u'yrm': 18,
u'brt': 18,
u'\xf8el': 18,
u'wae': 18,
u'umc': 18,
u'tjm': 18,
u'pnu': 18,
u'j#w': 18,
u'uvl': 18,
u'uvo': 18,
u'\xf6ll': 18,
u'dza': 18,
u'\u0163a#': 18,
u'cew': 18,
u'd\xe6g': 18,
u'gms': 18,
u'wiw': 18,
u'\xe8le': 18,
u'#ns': 18,
u'#bk': 18,
u'\xf8nv': 18,
u'syo': 18,
u'eze': 18,
u'mi\u0144': 18,
u'\u0103ne': 18,
u'ah\xe6': 18,
u'urz': 18,
u'llf': 18,
u'll\xf3': 18,
u'll\xe5': 18,
u'iay': 18,
u'\xe1la': 18,
u'\xad##': 18,
u'r\xf8p': 18,
u'oyi': 18,
u'h\xe6k': 18,
u'ie\xf8': 18,
u'ixp': 18,
u'l\xe4n': 18,
u'z\xe9k': 18,
u'\xa3#m': 18,
u'bdo': 18,
u'oji': 18,
u'\xf3#a': 18,
u'\xf3#c': 18,
u'#vf': 18,
u'm\xe9m': 18,
u'#m\u0103': 18,
u'kdy': 18,
u'n\xed#': 18,
u'n\u0163a': 18,
u'xal': 18,
u'n\xe1r': 18,
u'\xe5ts': 18,
u'rdj': 18,
u'zif': 18,
u'kps': 18,
u'phj': 18,
u'unl': 18,
u'\xdf#b': 18,
u'i\xf3#': 18,
u'\xedto': 18,
u'co\xe6': 18,
u'ghv': 18,
u'lmf': 18,
u'jty': 18,
u'\xe5\xf8j': 18,
u'oxl': 18,
u's\xe4l': 18,
u'l\xe5\xf8': 18,
u'ul\xe1': 18,
u'\xe5k\xf8': 18,
u'\xfcck': 18,
u'l\xedv': 18,
u'tuc': 18,
u'pa\xed': 18,
u'css': 18,
u'\xfal#': 18,
u'\xfcti': 18,
u'af\xf8': 18,
u'cjo': 18,
u'es\xe4': 18,
u'\xfd#f': 18,
u'mwr': 18,
u'\u0107#s': 18,
u'ra\xfa': 18,
u'a\xfal': 18,
u'\xe1\u0161#': 18,
u'csp': 18,
u'gki': 18,
u'gkg': 18,
u'ssd': 18,
u'cbd': 18,
u'y#y': 18,
u'bfi': 18,
u'ljo': 18,
u'#t\xfc': 18,
u'vaa': 18,
u'dgw': 17,
u'os\xf8': 17,
u'oev': 17,
u'\xe4hr': 17,
u'zob': 17,
u'jtf': 17,
u'ivd': 17,
u'gfy': 17,
u'aav': 17,
u'rf\xe5': 17,
u'\xe6tr': 17,
u'\xf8tj': 17,
u'aem': 17,
u'krz': 17,
u'ayv': 17,
u'cm\xf8': 17,
u'mvv': 17,
u'gnd': 17,
u'\xe0#v': 17,
u'obf': 17,
u'\xe5in': 17,
u'fb#': 17,
u'tjs': 17,
u'no\xeb': 17,
u'no\u015b': 17,
u'loj': 17,
u'#sq': 17,
u'ce\xf8': 17,
u'wiz': 17,
u'ngg': 17,
u'djy': 17,
u'#j\xfa': 17,
u'c#w': 17,
u'gy\xf6': 17,
u'unp': 17,
u'\xe1\u010de': 17,
u'fm\xf8': 17,
u'q#o': 17,
u'uk\xf8': 17,
u'#nv': 17,
u'boj': 17,
u'vhi': 17,
u'\xdfbe': 17,
u'ajl': 17,
u'twc': 17,
u'\xe4#f': 17,
u'vb\xe6': 17,
u'mms': 17,
u't\xe6s': 17,
u'iil': 17,
u'mi\u015f': 17,
u'rct': 17,
u'\xe5lu': 17,
u'm\xe5i': 17,
u'v\xe5s': 17,
u'nn\xe9': 17,
u'\u010dar': 17,
u'hzb': 17,
u'ehs': 17,
u'ylf': 17,
u'i#\xf6': 17,
u'vcp': 17,
u'si\xf3': 17,
u'lt\xe5': 17,
u'jod': 17,
u'm\xe9l': 17,
u'tci': 17,
u'\xe2yo': 17,
u's\xfcd': 17,
u'#m\xe4': 17,
u'txe': 17,
u'txa': 17,
u'hox': 17,
u'xie': 17,
u'b\u0159e': 17,
u'ipd': 17,
u'xma': 17,
u'odz': 17,
u'pds': 17,
u'btw': 17,
u'vms': 17,
u'rvl': 17,
u'\xdf#h': 17,
u'u\xf1a': 17,
u'akd': 17,
u't\xe1l': 17,
u'ep\xf8': 17,
u'm\u0103n': 17,
u'spg': 17,
u'aev': 17,
u'lmd': 17,
u'\xfaca': 17,
u'zau': 17,
u'\xf6w#': 17,
u'jtt': 17,
u'vut': 17,
u'ccu': 17,
u'a\xe2y': 17,
u'as\xf8': 17,
u'liq': 17,
u'\xdf#s': 17,
u'a\xfcl': 17,
u'hbo': 17,
u'jpl': 17,
u'l#\xe1': 17,
u'z#\xf8': 17,
u'yck': 17,
u'd\xfcs': 17,
u'n#\xf6': 17,
u'zum': 17,
u'r\xed#': 17,
u'\xe6sm': 17,
u'\xedk#': 17,
u'r\xe9v': 17,
u'jze': 17,
u't\xe0s': 17,
u'zf#': 17,
u'nfp': 17,
u'bum': 17,
u'ajm': 17,
u'ra\xfc': 17,
u'zb\xe5': 17,
u'nuh': 17,
u'wfp': 17,
u'ti\xe1': 17,
u'\xe6pe': 17,
u'lmt': 17,
u'\xedr#': 17,
u'lno': 17,
u'#pb': 17,
u'\xedko': 17,
u'a\xdfb': 17,
u'ibf': 17,
u'us\xe5': 17,
u'vcs': 17,
u'\xf8pe': 17,
u'j\xfac': 17,
u'skd': 17,
u'\u015foa': 16,
u'fiy': 16,
u'tzl': 16,
u'hao': 16,
u'fmy': 16,
u'#c\xad': 16,
u'f\xe6e': 16,
u'bv#': 16,
u'ps\xe5': 16,
u'w#j': 16,
u'\xe9n\xe9': 16,
u'ofn': 16,
u'nv#': 16,
u'wex': 16,
u'a#\xf6': 16,
u'br#': 16,
u'h\xe1c': 16,
u'jv\xe6': 16,
u'fbo': 16,
u'sox': 16,
u'pn#': 16,
u'mrk': 16,
u'dz#': 16,
u'\xfcbe': 16,
u'aua': 16,
u'e\xf1o': 16,
u'#zl': 16,
u'ayk': 16,
u'\u0159ez': 16,
u'oiz': 16,
u'uje': 16,
u'pca': 16,
u'\xe9ir': 16,
u'ch\xe8': 16,
u'\xf8bu': 16,
u'\xf8b\xe6': 16,
u'\xe9me': 16,
u'v\xe1s': 16,
u'oah': 16,
u'zdr': 16,
u'ecf': 16,
u'kuy': 16,
u'r\xfcn': 16,
u'elx': 16,
u'llc': 16,
u'ro\u015f': 16,
u'm\xe1l': 16,
u'\xe1lk': 16,
u'lhu': 16,
u'upb': 16,
u'\xf2#h': 16,
u'exd': 16,
u'\u0161t\xe1': 16,
u'crd': 16,
u'##x': 16,
u'myc': 16,
u'f\xfcr': 16,
u'\u015fia': 16,
u'rm\xe9': 16,
u've\xe6': 16,
u'ipk': 16,
u'ipm': 16,
u'ttm': 16,
u'saj': 16,
u'wpr': 16,
u'oh\xe1': 16,
u'\xf8so': 16,
u'evh': 16,
u'exr': 16,
u'lx#': 16,
u'uai': 16,
u'zeu': 16,
u'ntc': 16,
u'ktz': 16,
u'ykf': 16,
u'wc#': 16,
u'ytu': 16,
u'\xf8ul': 16,
u'\u016bt\u0117': 16,
u'l\xfct': 16,
u'yok': 16,
u'mpc': 16,
u'mpk': 16,
u'egk': 16,
u'st\xed': 16,
u'wou': 16,
u'won': 16,
u'cgt': 16,
u'hlm': 16,
u'awo': 16,
u'fge': 16,
u'otz': 16,
u'i\u015fo': 16,
u'#l\xe4': 16,
u'rlb': 16,
u'r\xe1i': 16,
u'baf': 16,
u'wwe': 16,
u'uap': 16,
u'oof': 16,
u's\xf3#': 16,
u'bm#': 16,
u'\xf8lp': 16,
u'\xfas#': 16,
u'a\u0161t': 16,
u'ggy': 16,
u'c\xad#': 16,
u'#dk': 16,
u'ik\xe5': 16,
u'ewf': 16,
u'f#\xf6': 16,
u'byn': 16,
u'vch': 16,
u'\xf8d\xe6': 16,
u'p#\xe6': 16,
u'vvo': 16,
u'cn#': 16,
u'nnt': 16,
u'o\u015fi': 16,
u'd\xe9p': 16,
u'o\u015b\u0107': 16,
u'r#+': 16,
u'\xe5br': 16,
u'cfi': 16,
u'\xe6gi': 16,
u'ybv': 16,
u'o\xe6k': 16,
u'ebp': 16,
u'lvn': 16,
u'lmb': 15,
u'\u0161\xedk': 15,
u'\u0144#i': 15,
u'ufp': 15,
u'ukh': 15,
u'rzi': 15,
u'kja': 15,
u'srb': 15,
u'pf\xe6': 15,
u'\xe1#t': 15,
u'#k\xe1': 15,
u'\xedvi': 15,
u'sjn': 15,
u'mp\xe5': 15,
u'lc#': 15,
u'\xe0#d': 15,
u'\xe0#e': 15,
u'\xe0#s': 15,
u'rn\xe9': 15,
u'obp': 15,
u'ibm': 15,
u'yil': 15,
u'uvn': 15,
u'jrt': 15,
u'\xeatr': 15,
u'tfj': 15,
u'iyy': 15,
u'hjo': 15,
u'#wf': 15,
u'eae': 15,
u'bcp': 15,
u'me\xe6': 15,
u'ovy': 15,
u'fnv': 15,
u'y\xf6r': 15,
u'\u010d\xe1k': 15,
u'zs#': 15,
u'km\xe5': 15,
u'iur': 15,
u'o#x': 15,
u'sya': 15,
u'ld\xf8': 15,
u'iio': 15,
u'gmy': 15,
u'q#h': 15,
u'#z#': 15,
u's\xedn': 15,
u'img': 15,
u'im\xed': 15,
u'mu\xf1': 15,
u'r\xfcg': 15,
u'+#d': 15,
u'yua': 15,
u'iah': 15,
u'tkk': 15,
u'pob': 15,
u'\xe9el': 15,
u'oyc': 15,
u'xp#': 15,
u'#vw': 15,
u'joz': 15,
u'vtf': 15,
u'mdy': 15,
u'\xf1iz': 15,
u'nxp': 15,
u'gd\xe6': 15,
u'it\xeb': 15,
u'le\xf3': 15,
u'cku': 15,
u'cky': 15,
u'tl\xe9': 15,
u'unw': 15,
u'xed': 15,
u'u\xf1i': 15,
u'm\xe4h': 15,
u'spm': 15,
u'vup': 15,
u'uxa': 15,
u'ccl': 15,
u'jhu': 15,
u'xec': 15,
u'h\xe6t': 15,
u'l\xf8#': 15,
u'kca': 15,
u'cdp': 15,
u'yci': 15,
u'j\u010d\xe1': 15,
u'e\xf3n': 15,
u'zyn': 15,
u'jln': 15,
u'#l\xed': 15,
u'r\xe1\u010d': 15,
u'tyc': 15,
u'bae': 15,
u'\xedoc': 15,
u'd#\xf6': 15,
u'moj': 15,
u'hya': 15,
u'e\xe6k': 15,
u'okf': 15,
u'#d\xe1': 15,
u'r\xe9e': 15,
u'udw': 15,
u'zfa': 15,
u'ksg': 15,
u'ys\xe9': 15,
u'\u0107#i': 15,
u'ajj': 15,
u'aj\u010d': 15,
u'ocn': 15,
u'ywa': 15,
u'ryf': 15,
u'an\u0103': 15,
u'gke': 15,
u'dya': 15,
u'wbe': 15,
u'cbi': 15,
u'uzl': 15,
u'rrs': 15,
u'jm\xe5': 14,
u'cfr': 14,
u'frk': 14,
u'ezf': 14,
u'\xe9ju': 14,
u'gfe': 14,
u'#g\xf3': 14,
u'\u0161#d': 14,
u'\xe1#b': 14,
u'\xe1#m': 14,
u'jau': 14,
u'aei': 14,
u'lgg': 14,
u'e\xe1n': 14,
u'vs\xe5': 14,
u'wef': 14,
u'er\xed': 14,
u'mvr': 14,
u'mvt': 14,
u'wze': 14,
u'jve': 14,
u'uza': 14,
u'pnt': 14,
u't\u0117s': 14,
u'#\xe2#': 14,
u'#s\xe8': 14,
u'ffh': 14,
u'ncb': 14,
u'#\xe6v': 14,
u's\xe6i': 14,
u'd\xe6i': 14,
u'tbv': 14,
u'xst': 14,
u'#nx': 14,
u'fss': 14,
u'\xe6lb': 14,
u'\xf3be': 14,
u'#b\u0159': 14,
u'n\u0103#': 14,
u'jba': 14,
u'rgv': 14,
u'iiu': 14,
u'\xe1di': 14,
u'rfb': 14,
u'dcr': 14,
u'\xe6#k': 14,
u'\xe6##': 14,
u'xxx': 14,
u'q#s': 14,
u'q#d': 14,
u'nhi': 14,
u'r\xfcc': 14,
u'+#h': 14,
u'ppy': 14,
u'g#\xed': 14,
u'oyn': 14,
u'e\xf8r': 14,
u'\xe9#c': 14,
u'jsc': 14,
u'\xf2#o': 14,
u'tgu': 14,
u'tg\xe6': 14,
u'j\xf8\xad': 14,
u'#i+': 14,
u'tph': 14,
u'bda': 14,
u'apy': 14,
u'\xf3#k': 14,
u'\xf3#t': 14,
u'vav': 14,
u's#+': 14,
u'myd': 14,
u'\xe5ho': 14,
u'at\xe1': 14,
u'xil': 14,
u'\u015b\u0107#': 14,
u'jkr': 14,
u'fov': 14,
u'khk': 14,
u'\xf8ky': 14,
u'\xe9th': 14,
u'\xe9ts': 14,
u'wp#': 14,
u'ddl': 14,
u'oh\xe5': 14,
u'uip': 14,
u'\xfcne': 14,
u'\xe4fn': 14,
u'npp': 14,
u'vm\xf8': 14,
u'eoa': 14,
u'\xe9lo': 14,
u'\u0161ov': 14,
u'ypn': 14,
u'co\xf8': 14,
u'vd\xe6': 14,
u'x#j': 14,
u'lmv': 14,
u'tdu': 14,
u'r\u016bn': 14,
u'z#\xe6': 14,
u'yco': 14,
u'ulc': 14,
u'shc': 14,
u'\xe9sb': 14,
u'\xfcgg': 14,
u'ar\u016b': 14,
u'\xe3os': 14,
u'is\xed': 14,
u'r\xedo': 14,
u'akf': 14,
u'fln': 14,
u'#t\xe1': 14,
u'csd': 14,
u'okd': 14,
u'okg': 14,
u'h\xe8l': 14,
u'vnp': 14,
u'r\xe9j': 14,
u'ewz': 14,
u'mks': 14,
u'\xe9ot': 14,
u't\xe0b': 14,
u'es\xfa': 14,
u'mwe': 14,
u'f\xe9r': 14,
u'gnl': 14,
u'+ko': 14,
u'nng': 14,
u'd\xf8l': 14,
u'rm\xe1': 14,
u'cbc': 14,
u'nb#': 14,
u'ri\u0161': 14,
u'ta\u015f': 14,
u'fev': 14,
u'kbi': 14,
u'h#q': 14,
u's\xfas': 14,
u'xr\xe5': 14,
u'o\xe6n': 14,
u'tji': 14,
u'mff': 14,
u'lmh': 13,
u'\xf8\xad#': 13,
u'jme': 13,
u'\xe6kb': 13,
u'zok': 13,
u'zou': 13,
u'zow': 13,
u'a\xed#': 13,
u'ezr': 13,
u'mno': 13,
u'i\xe6n': 13,
u'crc': 13,
u'nrp': 13,
u'ijm': 13,
u'ev\xf8': 13,
u'ucu': 13,
u'ucr': 13,
u'ofg': 13,
u'\xe5m\xf8': 13,
u'kr\u0161': 13,
u'hrv': 13,
u'rnn': 13,
u'waf': 13,
u'yvp': 13,
u'cae': 13,
u'\xe8te': 13,
u'rwo': 13,
u'\xed#s': 13,
u'uv#': 13,
u'bgr': 13,
u'aqs': 13,
u'\xe6rc': 13,
u'fps': 13,
u'#wg': 13,
u'bco': 13,
u'bcv': 13,
u'dr\u010d': 13,
u'zwe': 13,
u'\xf8\xe6n': 13,
u'\xe5#q': 13,
u'fsb': 13,
u'zsa': 13,
u'gey': 13,
u'ge\xf8': 13,
u'\u0142ow': 13,
u'vh\xe6': 13,
u'bku': 13,
u'hfa': 13,
u'\u0148ov': 13,
u's\xe9g': 13,
u'qur': 13,
u'ohy': 13,
u't\xe2n': 13,
u'rcl': 13,
u'oae': 13,
u'i+#': 13,
u'q#f': 13,
u'#w\xf6': 13,
u'mui': 13,
u'\xe9an': 13,
u'#\xe1#': 13,
u'\xdf#m': 13,
u'uyu': 13,
u'g\xe5n': 13,
u'v\xe9s': 13,
u'rkf': 13,
u'\xa3##': 13,
u'r\u010da': 13,
u'apj': 13,
u'apb': 13,
u'\xf3#r': 13,
u'#vy': 13,
u'dll': 13,
u'jog': 13,
u'##\xec': 13,
u'm\xe9r': 13,
u'xua': 13,
u'\xe8me': 13,
u'j\xf8c': 13,
u'jk\xf8': 13,
u'ttc': 13,
u'v#\xe9': 13,
u'axm': 13,
u'\xebl#': 13,
u'\xebll': 13,
u'\xe9te': 13,
u't\xeb#': 13,
u'acb': 13,
u'uio': 13,
u'cpd': 13,
u'\u0117#m': 13,
u'se\xe1': 13,
u'ckk': 13,
u'xe#': 13,
u'ptt': 13,
u'akc': 13,
u't\xf8s': 13,
u'wg#': 13,
u'\xf6s#': 13,
u'nbj': 13,
u'thp': 13,
u'r#\u010d': 13,
u'ykr': 13,
u'xyp': 13,
u'\xf8go': 13,
u'gh\xe5': 13,
u'ghg': 13,
u'aee': 13,
u'\u015f#m': 13,
u'\u015f#f': 13,
u'h\xf8#': 13,
u'jpa': 13,
u'cdf': 13,
u'cdk': 13,
u'be\u0148': 13,
u'hlb': 13,
u'ecn': 13,
u'inx': 13,
u'ot\xe6': 13,
u'jld': 13,
u'mch': 13,
u'di\xf8': 13,
u'di\xf1': 13,
u'zno': 13,
u'bmt': 13,
u'e\xe6d': 13,
u'\xadgr': 13,
u'n#\xf3': 13,
u're\xe6': 13,
u'daj': 13,
u'io+': 13,
u'ys\xe6': 13,
u'#=#': 13,
u'e\u0148o': 13,
u'a\xe5r': 13,
u'thn': 13,
u'epd': 13,
u'lcu': 13,
u'cnu': 13,
u'r\u0161k': 13,
u'pmp': 13,
u'gk\xe6': 13,
u'#v\xed': 13,
u'ssh': 13,
u'cbf': 13,
u'te\xf8': 13,
u'r#\xe0': 13,
u'i\u0161t': 13,
u'm\xf8k': 13,
u'izs': 13,
u'#t\xe9': 13,
u'#tn': 13,
u'us\u0142': 13,
u'\xe6gd': 13,
u'vct': 13,
u'h#\xf8': 13,
u'hmb': 13,
u'lvj': 13,
u'p\xe9t': 12,
u'jmo': 12,
u'yfl': 12,
u'yf\xe6': 12,
u'rzw': 12,
u'rzs': 12,
u'\xfcls': 12,
u'uoe': 12,
u'scs': 12,
u'pb#': 12,
u'j\xf8\xe6': 12,
u'f\xe6k': 12,
u'n\xb4s': 12,
u'jey': 12,
u'p\xf6s': 12,
u'+be': 12,
u'evd': 12,
u'\xe1#i': 12,
u'k\xe5b': 12,
u'irz': 12,
u'rbs': 12,
u'\xf6mb': 12,
u'mvg': 12,
u'yvd': 12,
u'\xe6im': 12,
u'\u0107s#': 12,
u'\xe3es': 12,
u'\xed#m': 12,
u'li\u0107': 12,
u'#ax': 12,
u'ifj': 12,
u'gmp': 12,
u'eeh': 12,
u'i\xe9#': 12,
u'dvm': 12,
u'ozu': 12,
u'yaz': 12,
u'c#\xf8': 12,
u'hnv': 12,
u'auo': 12,
u'\xe2nc': 12,
u'\u0107#e': 12,
u'jja': 12,
u'\xe4m\xe4': 12,
u'\xe5#+': 12,
u'ayf': 12,
u'e#\xe0': 12,
u'u\xdf#': 12,
u'sv\xe1': 12,
u'jfl': 12,
u'jfr': 12,
u'prm': 12,
u'\xe4#h': 12,
u'f#x': 12,
u'#\xe9i': 12,
u'#\xe9l': 12,
u'jbr': 12,
u'miy': 12,
u'xdi': 12,
u'\xf8fu': 12,
u'kui': 12,
u'kua': 12,
u'q#e': 12,
u'cyl': 12,
u'mux': 12,
u'\xe9ar': 12,
u'roq': 12,
u'kya': 12,
u'poy': 12,
u'oyb': 12,
u'jee': 12,
u'#r\xe1': 12,
u'#i\xf1': 12,
u'ylg': 12,
u'ylh': 12,
u'l\xe4i': 12,
u'tps': 12,
u'f\xf8n': 12,
u'soo': 12,
u'm\xe9f': 12,
u'kdp': 12,
u'a\xf8e': 12,
u'lp\xe5': 12,
u'zt#': 12,
u'jkl': 12,
u'foz': 12,
u'kht': 12,
u'nx#': 12,
u'#a+': 12,
u'dym': 12,
u'\xf8ke': 12,
u'uiv': 12,
u'\u0117#f': 12,
u'evb': 12,
u'bh#': 12,
u'xar': 12,
u'ctv': 12,
u'ctf': 12,
u'le\xe6': 12,
u's\xe8t': 12,
u'btm': 12,
u'\xe1rm': 12,
u'zep': 12,
u'm\xe4l': 12,
u'bps': 12,
u'ykj': 12,
u'ykp': 12,
u'ccm': 12,
u'mpm': 12,
u'egh': 12,
u'st\xe2': 12,
u'oxh': 12,
u'a\xe6g': 12,
u'h\xfcr': 12,
u'#hk': 12,
u'ul\xe5': 12,
u'shp': 12,
u'zyp': 12,
u'vfy': 12,
u'#\xecg': 12,
u'u\xe1r': 12,
u'l\xe1#': 12,
u'n#\xe0': 12,
u'baw': 12,
u'gex': 12,
u'kkn': 12,
u'++#': 12,
u's\xf3l': 12,
u'bme': 12,
u'bmi': 12,
u'u\xf8s': 12,
u'dew': 12,
u'\u0142os': 12,
u'cfl': 12,
u'a\u015f#': 12,
u'\u015bwi': 12,
u'l\xe9g': 12,
u'ewl': 12,
u'biu': 12,
u'feg': 12,
u'mke': 12,
u'sds': 12,
u're\u0161': 12,
u'\xe5no': 12,
u'ysd': 12,
u'h#y': 12,
u'enq': 12,
u'wsi': 12,
u'aju': 12,
u'yja': 12,
u'd\xe9f': 12,
u'g\u0142o': 12,
u'\xe9gi': 12,
u'prz': 12,
u'h\xe4m': 12,
u'rv\xe9': 12,
u'm\xe9g': 12,
u'\xe5fe': 12,
u'ju\xe1': 12,
u'\xe1re': 12,
u'izu': 12,
u'p+s': 12,
u'zze': 12,
u'wse': 12,
u'ca\xed': 12,
u'bbb': 12,
u'lmr': 11,
u'jm#': 11,
u'r+p': 11,
u'kfs': 11,
u'ir\xe9': 11,
u'ir\xe6': 11,
u'vkv': 11,
u'k\xf6p': 11,
u'\xf1or': 11,
u'j#\xe5': 11,
u't\xe9l': 11,
u'#rr': 11,
u'rz#': 11,
u'zop': 11,
u'fiv': 11,
u'yzo': 11,
u'tv\xe1': 11,
u't#+': 11,
u'xot': 11,
u'wra': 11,
u'k\xe1r': 11,
u'ugj': 11,
u'#g\u0142': 11,
u'ijb': 11,
u'\xe6to': 11,
u'sgh': 11,
u'pfs': 11,
u'dbn': 11,
u'irj': 11,
u'aej': 11,
u'uc\xed': 11,
u'ofc': 11,
u'm\xe6h': 11,
u'a+#': 11,
u'z\xf3#': 11,
u'd\xeat': 11,
u'\xb5gm': 11,
u'fff': 11,
u'\xe9fo': 11,
u'#sg': 11,
u'ifp': 11,
u'tfs': 11,
u'iyd': 11,
u'bgi': 11,
u'pww': 11,
u'aqa': 11,
u'lkd': 11,
u'fpi': 11,
u'jno': 11,
u'#wz': 11,
u'ngj': 11,
u'ng+': 11,
u'c#+': 11,
u'auh': 11,
u'mew': 11,
u'kiy': 11,
u'yek': 11,
u'#nm': 11,
u'xli': 11,
u'cqs': 11,
u'oik': 11,
u'#\xdf#': 11,
u'#bd': 11,
u'\xef#o': 11,
u'pct': 11,
u'pcl': 11,
u'#\xe9r': 11,
u'#hb': 11,
u's\xe9m': 11,
u'xda': 11,
u'\xf8br': 11,
u'\xe9mi': 11,
u'gmb': 11,
u'\xe6#g': 11,
u'rcg': 11,
u'\xf8fj': 11,
u'nwo': 11,
u'#zs': 11,
u'm\xe5v': 11,
u'im\xf3': 11,
u'+#b': 11,
u'tvl': 11,
u'giy': 11,
u'ahj': 11,
u'tb\xe6': 11,
u'v\xe9d': 11,
u'v\xe9l': 11,
u'al\xe1': 11,
u'lh\xe3': 11,
u'ie\xe6': 11,
u'e\xf8v': 11,
u'jsh': 11,
u'ie\xe5': 11,
u'fgu': 11,
u'#ij': 11,
u'\u010d#f': 11,
u'hks': 11,
u'ltc': 11,
u'\xf3#h': 11,
u'##z': 11,
u'##\xe2': 11,
u'rpp': 11,
u'\u010die': 11,
u'\xf3me': 11,
u'l\xe0s': 11,
u'zti': 11,
u'i\u0107s': 11,
u'fcc': 11,
u'g\xe9#': 11,
u'n\xedk': 11,
u'\xe9ti': 11,
u'ddd': 11,
u'acm': 11,
u'uix': 11,
u'cpo': 11,
u'cpe': 11,
u'tpw': 11,
u'se\xf1': 11,
u'sew': 11,
u'xap': 11,
u'xam': 11,
u'vmp': 11,
u'\xf8ce': 11,
u'#\u010dr': 11,
u'\xb0#c': 11,
u'g\xf8e': 11,
u'\xf8gg': 11,
u'ytb': 11,
u'uxi': 11,
u'yof': 11,
u'puf': 11,
u'dtm': 11,
u'\xe6fi': 11,
u'idz': 11,
u'vb#': 11,
u'l\xe5m': 11,
u'bex': 11,
u'dpf': 11,
u'otw': 11,
u'fhy': 11,
u'tyb': 11,
u'\u0142as': 11,
u'\xedo#': 11,
u'kkk': 11,
u'kk\xe6': 11,
u'\xe6sh': 11,
u'pa\xf1': 11,
u'#d\xea': 11,
u'r\xe9n': 11,
u'pe\xad': 11,
u'g\xf3m': 11,
u'f\xe5m': 11,
u'\xebs#': 11,
u'zff': 11,
u'mwa': 11,
u'\xe7\xe3o': 11,
u'\xe9co': 11,
u'raq': 11,
u'i\xf1i': 11,
u'ggo': 11,
u'cny': 11,
u'\xe1no': 11,
u'w\xf6g': 11,
u'pms': 11,
u'pmg': 11,
u'd\xe9j': 11,
u'\xe9ga': 11,
u'prg': 11,
u'prv': 11,
u'gv#': 11,
u'faa': 11,
u'gr#': 11,
u'ibg': 11,
u'fez': 11,
u'\xfcdd': 11,
u'wsp': 11,
u'ebd': 11,
u'\xf8\xe5r': 10,
u'\xe6kd': 10,
u'lrs': 10,
u't\xe9m': 10,
u'on\xe7': 10,
u'#c\xe9': 10,
u'heh': 10,
u'pbt': 10,
u'pbi': 10,
u'aai': 10,
u'wzn': 10,
u'lzo': 10,
u'\xe1#n': 10,
u'dbm': 10,
u'aeo': 10,
u'cmo': 10,
u'cmm': 10,
u'nkw': 10,
u'erq': 10,
u'aiu': 10,
u'vw#': 10,
u'l\xf2#': 10,
u'pna': 10,
u'\xb5g#': 10,
u'srm': 10,
u'rj#': 10,
u'\xf8#c': 10,
u'\xed#h': 10,
u'wmo': 10,
u'jru': 10,
u'ffm': 10,
u'dtk': 10,
u'#++': 10,
u'fju': 10,
u'yav': 10,
u'#jh': 10,
u'#js': 10,
u'mzo': 10,
u'eaa': 10,
u'bca': 10,
u'me\xf8': 10,
u'au\u015f': 10,
u'\u03b1\u03c4\u03bf': 10,
u'\u0105sk': 10,
u'\xf6rs': 10,
u'axb': 10,
u'sn#': 10,
u'c\xe1d': 10,
u'zso': 10,
u'dk#': 10,
u'\u010dr#': 10,
u'jfa': 10,
u'\u0142o#': 10,
u'\xf3ba': 10,
u'#bf': 10,
u'hf#': 10,
u'u\u015fe': 10,
u'mmp': 10,
u'o#\xe9': 10,
u't\xe6#': 10,
u'vbl': 10,
u'nsw': 10,
u'iie': 10,
u'tsz': 10,
u'bwa': 10,
u'nwr': 10,
u'sct': 10,
u'q#i': 10,
u'vpi': 10,
u'cl#': 10,
u'\u010des': 10,
u'\xe6dc': 10,
u'\xe6dk': 10,
u'r\xe6o': 10,
u'vts': 10,
u'vt\xf8': 10,
u'yu#': 10,
u'+mo': 10,
u'\xe1ls': 10,
u'fci': 10,
u'l\u0105s': 10,
u'r\xf8a': 10,
u'\xe9ep': 10,
u'suz': 10,
u'#r\xed': 10,
u'jsy': 10,
u'\xf2#r': 10,
u'qab': 10,
u'ahd': 10,
u'i#+': 10,
u'\xf3#b': 10,
u'vih': 10,
u'cr\xe8': 10,
u'tcs': 10,
u'tcm': 10,
u's#\xe1': 10,
u'#m\xed': 10,
u'xa#': 10,
u'fhu': 10,
u'zto': 10,
u'prt': 10,
u'dhf': 10,
u'dhv': 10,
u'jkn': 10,
u'kh\xe6': 10,
u'k\u016bn': 10,
u'a\xefd': 10,
u'g\xe9n': 10,
u'\xf8va': 10,
u'#\u03b2#': 10,
u'bln': 10,
u'\u015fes': 10,
u'gdf': 10,
u'dd\xe5': 10,
u'oh\xf8': 10,
u'zmi': 10,
u'r\xe8m': 10,
u'it\xe0': 10,
u'\xe1gn': 10,
u'exl': 10,
u'tpp': 10,
u'\xefd#': 10,
u'\xe9z#': 10,
u'jcb': 10,
u'od\xe6': 10,
u'zip': 10,
u'p\xf8b': 10,
u'ihv': 10,
u'ihd': 10,
u'\xe1ks': 10,
u'btj': 10,
u'etx': 10,
u'\xe6hr': 10,
u'hpv': 10,
u'\u0111el': 10,
u'ptd': 10,
u'rk\u016b': 10,
u'glb': 10,
u'\u010d#h': 10,
u'yps': 10,
u'ni\xe9': 10,
u'ni\xe5': 10,
u'xye': 10,
u'ek\xe5': 10,
u'htr': 10,
u'ekp': 10,
u'aod': 10,
u'ghz': 10,
u'#\u015bl': 10,
u'h\xe3e': 10,
u'vuo': 10,
u'uxr': 10,
u'u\xe9b': 10,
u'\xe9dr': 10,
u'\xdf#i': 10,
u'ma\u0142': 10,
u'ut\xe9': 10,
u'#hz': 10,
u'r\xe5h': 10,
u'#\u03bd\u03b1': 10,
u'pyw': 10,
u'mgj': 10,
u'otd': 10,
u'dmp': 10,
u'ne\xe5': 10,
u'r\xe1s': 10,
u'#\xfcb': 10,
u'\u015bl\u0105': 10,
u'd#\u0161': 10,
u'xne': 10,
u'de\xe5': 10,
u'ko\u0161': 10,
u'uht': 10,
u'\xf6hl': 10,
u'ta\u0142': 10,
u'l\xe9e': 10,
u'mk#': 10,
u'gc#': 10,
u'siy': 10,
u'\xe5si': 10,
u'og\u0142': 10,
u'fcp': 10,
u'ixs': 10,
u'#k\xfc': 10,
u'\xe5n\xf8': 10,
u'fy#': 10,
u'esw': 10,
u'buo': 10,
u'\u0107#h': 10,
u'aii': 10,
u'lb\xe5': 10,
u'kwh': 10,
u'lcs': 10,
u'g\u0142a': 10,
u'ej\xf8': 10,
u'\xe9go': 10,
u'hux': 10,
u'mmn': 10,
u'r#\xe1': 10,
u'r\xe6u': 10,
u'r\xe6h': 10,
u'bfl': 10,
u'#df': 10,
u'gr\xf3': 10,
u'gr\xe9': 10,
u'\u03bd\u03b1\u03c4': 10,
u'#tt': 10,
u'#tl': 10,
u'rr\xe1': 10,
u'rr\xe0': 10,
u'p\xe5e': 10,
u'd\xe1i': 10,
u'yb\xe1': 10,
u'l\xe6#': 10,
u'\xe9rb': 9,
u'x#w': 9,
u'p\xe9c': 9,
u'\xefs#': 9,
u'ir\xfa': 9,
u'cuk': 9,
u'mbd': 9,
u'mb\xe5': 9,
u'mb\xe6': 9,
u'os\u0142': 9,
u'on\xe9': 9,
u'on\xe5': 9,
u'zom': 9,
u'#ck': 9,
u'a\xedt': 9,
u'bja': 9,
u'\xf8mo': 9,
u't\xe5n': 9,
u'\xe1#e': 9,
u'\xe1#v': 9,
u'euw': 9,
u'rbm': 9,
u'\xfail': 9,
u'nvc': 9,
u'm\xe6c': 9,
u'm\xe6u': 9,
u'a#\xe0': 9,
u'a#\xa3': 9,
u'm\xfct': 9,
u'mvd': 9,
u'udc': 9,
u'gnk': 9,
u'\xe0#m': 9,
u'\xe0#a': 9,
u'obv': 9,
u'k#x': 9,
u'wac': 9,
u'i\u0161o': 9,
u'azy': 9,
u'eiv': 9,
u'\xe9fr': 9,
u'v\xe8n': 9,
u'\xf8#j': 9,
u'\xed#e': 9,
u'jrl': 9,
u'cej': 9,
u'ifb': 9,
u'ifc': 9,
u'uv\xe5': 9,
u'y\xf8e': 9,
u'd\xe6e': 9,
u'iys': 9,
u'l\u010dy': 9,
u'lkh': 9,
u'ozs': 9,
u'e+f': 9,
u'#ws': 9,
u'yau': 9,
u'yae': 9,
u'\u0142\xf3d': 9,
u's\xe4c': 9,
u'vyt': 9,
u'vlr': 9,
u'\xe5#\xba': 9,
u'#nj': 9,
u'axw': 9,
u'zs\xe4': 9,
u'gew': 9,
u'jfn': 9,
u'cly': 9,
u'oix': 9,
u'\xef#s': 9,
u'iuc': 9,
u'\xe1\u0161k': 9,
u'hfl': 9,
u'\xe4#o': 9,
u'pcp': 9,
u'\xe1\xf1e': 9,
u'oeh': 9,
u'nsz': 9,
u'b\xe4c': 9,
u'ezh': 9,
u's\xe9p': 9,
u'vlu': 9,
u'shl': 9,
u'#\xf6s': 9,
u'uia': 9,
u'l\xe5b': 9,
u'y\xe1\xf1': 9,
u'gmf': 9,
u'u\u0161n': 9,
u'\xe6#n': 9,
u'uby': 9,
u'+in': 9,
u'imn': 9,
u'm\xe5j': 9,
u'\xf6#o': 9,
u'+#a': 9,
u'xxv': 9,
u'+#u': 9,
u'+#p': 9,
u'\xf8fr': 9,
u'\xf8f\xf8': 9,
u'pyl': 9,
u'awf': 9,
u'ah\xe1': 9,
u'kyf': 9,
u'nl\xe5': 9,
u'\u0117ni': 9,
u'\xf8zo': 9,
u'r\xf8o': 9,
u'\xe9j\xe0': 9,
u'#\xe5#': 9,
u'al\u010d': 9,
u'o\u017eu': 9,
u'jsu': 9,
u'iez': 9,
u's\xf8b': 9,
u'\u010d#s': 9,
u'apg': 9,
u'fk\xe6': 9,
u'#+f': 9,
u'a\xf8k': 9,
u'r\xe0s': 9,
u'l\xe0b': 9,
u'j\xf8z': 9,
u'md#': 9,
u'xib': 9,
u'\xe9ph': 9,
u'sjv': 9,
u'ol\xf2': 9,
u'qi#': 9,
u'yd\xe5': 9,
u'ip\xe6': 9,
u'k\xf8s': 9,
u'sax': 9,
u'b\xfcl': 9,
u'acn': 9,
u'ohd': 9,
u'cpi': 9,
u'cpm': 9,
u'\xf8sf': 9,
u'\xf8sm': 9,
u'hg#': 9,
u'pd\xe6': 9,
u'xak': 9,
u'mly': 9,
u'b\xf8v': 9,
u'npm': 9,
u'npd': 9,
u'npb': 9,
u'\u03b5\u03bc\u03b1': 9,
u'kpp': 9,
u'a\xadp': 9,
u'a\xad#': 9,
u'\xdf#a': 9,
u'ak\xf8': 9,
u'\xe3ob': 9,
u'\u010dyt': 9,
u'\xe5po': 9,
u'\u010d#m': 9,
u'o\u0161\xed': 9,
u'yph': 9,
u'b\xe1\u0161': 9,
u'ni\xe8': 9,
u'ni\xe6': 9,
u'th\xfc': 9,
u'epk': 9,
u'yku': 9,
u'plc': 9,
u'plm': 9,
u'xyd': 9,
u'#\u0111e': 9,
u'htd': 9,
u'htl': 9,
u'spn': 9,
u'#\u015bw': 9,
u'gbs': 9,
u'x#y': 9,
u'\u015f#e': 9,
u'vus': 9,
u'uxp': 9,
u'eg\xe9': 9,
u'\u017eu\u0161': 9,
u'xei': 9,
u'gp#': 9,
u'b\xe9c': 9,
u'jp#': 9,
u'l#\xb0': 9,
u'ru\xf1': 9,
u'ma\xf1': 9,
u'cdb': 9,
u'j\xe0#': 9,
u'hld': 9,
u'ecq': 9,
u'mgm': 9,
u'\xfds#': 9,
u'wki': 9,
u'ne\xf8': 9,
u'#lm': 9,
u'r\xe1d': 9,
u'xu#': 9,
u'tyi': 9,
u'mcy': 9,
u'b\xf6r': 9,
u'ul\u0117': 9,
u'ooc': 9,
u'\u010d#d': 9,
u'l\u0117n': 9,
u'$##': 9,
u'a\u0161e': 9,
u'i\xfai': 9,
u'o\u0161#': 9,
u'csl': 9,
u'csc': 9,
u'ko\u017e': 9,
u'v\xedt': 9,
u'r\xe9g': 9,
u'l\xe9m': 9,
u'l\xe9t': 9,
u'bij': 9,
u'bii': 9,
u'dax': 9,
u'm\xfch': 9,
u'cw#': 9,
u'#xu': 9,
u't\u0117m': 9,
u'ys\xf8': 9,
u'nfc': 9,
u'#kc': 9,
u'#ks': 9,
u'\u0107#d': 9,
u'\u0161n\xed': 9,
u'lb\xe6': 9,
u'\xf6pi': 9,
u'icb': 9,
u'cne': 9,
u'ti\xfa': 9,
u'nnh': 9,
u'\xf3sa': 9,
u'\u0117mi': 9,
u'd\xe9l': 9,
u'\u03bc\u03b1s': 9,
u'hfu': 9,
u'lnp': 9,
u'nb\xe5': 9,
u'tez': 9,
u'pvt': 9,
u'\xf3d\u017a': 9,
u'#ds': 9,
u'#tk': 9,
u'ibp': 9,
u'+sy': 9,
u'vcj': 9,
u'#kp': 9,
u'ebc': 9,
u'sk\xfd': 9,
u'owc': 9,
u'owp': 9,
u'czw': 8,
u'\xedbe': 8,
u'tzm': 8,
u'k\xf6h': 8,
u'j#\xe6': 8,
u'sc\xf4': 8,
u'uxk': 8,
u'\xe5v\xe6': 8,
u'\xf3go': 8,
u'h\xe9i': 8,
u'amz': 8,
u'w\xe9#': 8,
u'\xe1#r': 8,
u'w#r': 8,
u'jax': 8,
u'jaq': 8,
u'gbm': 8,
u'lgj': 8,
u'zge': 8,
u'a#\u0151': 8,
u'wey': 8,
u'krs': 8,
u'yr\xe9': 8,
u'cms': 8,
u'cm\xe5': 8,
u'\xf8em': 8,
u'mvk': 8,
u'\xe9ba': 8,
u'ai\xf8': 8,
u'h\xe1z': 8,
u'uzn': 8,
u'no\xf8': 8,
u'yim': 8,
u'pnv': 8,
u'\xf8yd': 8,
u'azr': 8,
u'hv\xe6': 8,
u'am\xed': 8,
u'dzh': 8,
u'kzl': 8,
u'#s\xed': 8,
u'ify': 8,
u'\xe2#a': 8,
u'ymu': 8,
u'ym\xe5': 8,
u'gmm': 8,
u'pi\xf1': 8,
u'yaw': 8,
u'bcj': 8,
u'auw': 8,
u'dr\xe1': 8,
u'au\xdf': 8,
u'fn\xb4': 8,
u'\u03c4\u03bf#': 8,
u'wda': 8,
u'yeo': 8,
u's\xeds': 8,
u'v#z': 8,
u'fsw': 8,
u'e#\u03b9': 8,
u'ma\xe5': 8,
u'orq': 8,
u'a\u0163i': 8,
u'ge\xe6': 8,
u'dkk': 8,
u'e\u015f#': 8,
u'\xef#h': 8,
u'\xfcmb': 8,
u'\xe7ai': 8,
u'k\xfds': 8,
u'g+#': 8,
u'hfi': 8,
u'pcs': 8,
u'mmt': 8,
u'o#+': 8,
u'cux': 8,
u'ld\xe9': 8,
u'lb#': 8,
u's\xe9a': 8,
u's\xe9k': 8,
u'\xe9m\xf8': 8,
u'g#\xf6': 8,
u'\xe6#u': 8,
u'\xe6#l': 8,
u'hll': 8,
u'\xf8fe': 8,
u'kuz': 8,
u'kuo': 8,
u'vpe': 8,
u'#zb': 8,
u'bsj': 8,
u'pki': 8,
u'tvh': 8,
u'elw': 8,
u'\xe9at': 8,
u'#\xe1g': 8,
u'pyk': 8,
u'ppg': 8,
u'#+k': 8,
u'j\xf8\xe5': 8,
u'k\u0142o': 8,
u'\xe6vi': 8,
u'd\u017a#': 8,
u'#rb': 8,
u'm\xedl': 8,
u'\xf2#s': 8,
u'tg#': 8,
u'qad': 8,
u's\xf8u': 8,
u'a\xe7i': 8,
u'g#\xe1': 8,
u'ho\u0148': 8,
u'j\xf8y': 8,
u'mds': 8,
u'smk': 8,
u'\xe9pa': 8,
u'jku': 8,
u'ol\xed': 8,
u'a\xefs': 8,
u'ydu': 8,
u'#a\xad': 8,
u'\xe1ci': 8,
u'sa\xed': 8,
u'\u0103ia': 8,
u'cpl': 8,
u'cpb': 8,
u'viz': 8,
u'#ez': 8,
u'\u0117#o': 8,
u'pvd': 8,
u'e\xadg': 8,
u'pdl': 8,
u'ev\xe8': 8,
u'b\xf8t': 8,
u'is\u0142': 8,
u'agj': 8,
u'agw': 8,
u'\u0161ka': 8,
u'od\xe9': 8,
u'rdz': 8,
u'zik': 8,
u'npn': 8,
u'p\xf8n': 8,
u'btt': 8,
u'vmy': 8,
u'gl\xfc': 8,
u'la\u0161': 8,
u'\xe1ra': 8,
u'\xecg#': 8,
u'uae': 8,
u'r#\xba': 8,
u'\xb0#\xb0': 8,
u'thk': 8,
u'bpa': 8,
u'\xf8gb': 8,
u'xyg': 8,
u'ghk': 8,
u'rl\xe5': 8,
u'\u015f#o': 8,
u'\u015f#a': 8,
u'\xf6wb': 8,
u'kxs': 8,
u'jto': 8,
u'gwe': 8,
u'ccw': 8,
u'ccb': 8,
u'b#z': 8,
u'#\u20ac#': 8,
u'yoy': 8,
u'cvm': 8,
u'n\xfd#': 8,
u'as\xf3': 8,
u'cv\xe5': 8,
u'\xdf#v': 8,
u'oxu': 8,
u'xey': 8,
u'jpe': 8,
u'\xe6na': 8,
u'#hg': 8,
u'l\xe5f': 8,
u'lu\xf1': 8,
u'luv': 8,
u'luw': 8,
u'luz': 8,
u'dp\xf8': 8,
u'jlc': 8,
u'\xf3ly': 8,
u'up\xe9': 8,
u'ygo': 8,
u'vf#': 8,
u'tyo': 8,
u'mcj': 8,
u'slm': 8,
u'ro\xeb': 8,
u'\xadpa': 8,
u'\xf4le': 8,
u's\xf3g': 8,
u'tuw': 8,
u'qsa': 8,
u'bmk': 8,
u'hdg': 8,
u'abf': 8,
u'abg': 8,
u'a\u0161\xed': 8,
u'pa\xad': 8,
u'\u0142op': 8,
u'\u0142od': 8,
u'ikc': 8,
u'fuo': 8,
u'pe\xf8': 8,
u'\xfctz': 8,
u'lm\xfc': 8,
u'#\u0142\xf3': 8,
u'ixd': 8,
u'byh': 8,
u'f\xe9#': 8,
u'a\u0161k': 8,
u'thc': 8,
u'p#\xe9': 8,
u'nux': 8,
u'icm': 8,
u'msz': 8,
u'yje': 8,
u'cso': 8,
u'huy': 8,
u'dyp': 8,
u'\u015bci': 8,
u'cbp': 8,
u'\xe1rq': 8,
u'nb\xe6': 8,
u'r#\xb0': 8,
u'\xf3wr': 8,
u'td\xe9': 8,
u'ynp': 8,
u'duv': 8,
u'p+m': 8,
u'vnm': 8,
u'\xe5bi': 8,
u'ta\xed': 8,
u'vca': 8,
u'#kz': 8,
u'vah': 8,
u'ybb': 8,
u'ebj': 8,
u'bbf': 8,
u'sk\u0142': 8,
u'\xe9r\xe8': 7,
u'xtj': 7,
u'dn\xed': 7,
u'fiz': 7,
u'fi\xf8': 7,
u'\xe1zy': 7,
u'vg\xe5': 7,
u'vgl': 7,
u'ir\xf8': 7,
u'ir\xed': 7,
u'yfe': 7,
u'\xe9vo': 7,
u'gza': 7,
u'jit': 7,
u'jiz': 7,
u'zot': 7,
u'vk\xf8': 7,
u'a\xedo': 7,
u'fix': 7,
u'iv\xe5': 7,
u'gwo': 7,
u't#\xe1': 7,
u'smm': 7,
u'i\xe6#': 7,
u'#fc': 7,
u'k\xe1b': 7,
u'e\xe5b': 7,
u'nrf': 7,
u'kn#': 7,
u'd\u0103i': 7,
u'ci\xf8': 7,
u'\xf8ad': 7,
u'\xe1#u': 7,
u'\xe1##': 7,
u't\xe1r': 7,
u'\xe9no': 7,
u'\xe9nb': 7,
u'szi': 7,
u'dbf': 7,
u'szp': 7,
u'jaj': 7,
u'aec': 7,
u'gb\xe5': 7,
u'ae\xf8': 7,
u'ucp': 7,
u'ucn': 7,
u'ucm': 7,
u'wez': 7,
u'\xedva': 7,
u'lff': 7,
u'\xe5mo': 7,
u'\u0163i#': 7,
u'\xe1is': 7,
u'\xe1ir': 7,
u'brz': 7,
u'brl': 7,
u'da\xf8': 7,
u'tny': 7,
u'tn\xfd': 7,
u'hrp': 7,
u'hrm': 7,
u'hr\xe9': 7,
u'ty\u0144': 7,
u'i\xf1e': 7,
u'\xe0#i': 7,
u'rnc': 7,
u'ob\xe6': 7,
u'\xe0##': 7,
u'obg': 7,
u'\xeet#': 7,
u'yvm': 7,
u'uzo': 7,
u'uzh': 7,
u'\xf3ri': 7,
u'yio': 7,
u'j\xe6f': 7,
u'vtg': 7,
u'\xe1ti': 7,
u'gji': 7,
u'am\xe9': 7,
u'am\xfc': 7,
u'srh': 7,
u'\xed#o': 7,
u'\u0159ic': 7,
u'pup': 7,
u'ffv': 7,
u'uvs': 7,
u'uuu': 7,
u'm#+': 7,
u'ym\xf8': 7,
u'eew': 7,
u'eei': 7,
u'bga': 7,
u'\xf8vb': 7,
u'svm': 7,
u'fpp': 7,
u'p\xe6s': 7,
u'cyd': 7,
u'fja': 7,
u'yay': 7,
u'mza': 7,
u'auj': 7,
u'lwi': 7,
u'o+#': 7,
u'xhe': 7,
u'\xe6op': 7,
u'un\xe5': 7,
u'\xefrs': 7,
u'#n\xed': 7,
u'xwe': 7,
u'fsu': 7,
u'ayp': 7,
u'ma\xf8': 7,
u'bo\u0161': 7,
u'\xf8j\xe6': 7,
u'zst': 7,
u'e#\xa3': 7,
u'\u015bni': 7,
u'uj\u015b': 7,
u'\xe6lf': 7,
u'\xfans': 7,
u'h\xf6n': 7,
u'kms': 7,
u'\xfd#o': 7,
u'n\xe5n': 7,
u'hfr': 7,
u'\xe4#s': 7,
u'pcg': 7,
u'j\xf3n': 7,
u'o#\xa3': 7,
u'ufy': 7,
u'zhu': 7,
u'\xe4si': 7,
u'ch\xe6': 7,
u'\xe1d#': 7,
u'pgs': 7,
u'gmc': 7,
u'oai': 7,
u'oav': 7,
u'q#m': 7,
u'\xe5l\xe5': 7,
u'vpd': 7,
u'vpg': 7,
u'imk': 7,
u'\xe7i#': 7,
u'\xfcrc': 7,
u'\xfcre': 7,
u'muo': 7,
u'\u0117#s': 7,
u'\xe9ad': 7,
u'v\xe5n': 7,
u'ah\xf8': 7,
u'#\xe1r': 7,
u'ro\u0161': 7,
u'\xfaba': 7,
u'ke\xe5': 7,
u'tbf': 7,
u'tkm': 7,
u'e\xf8n': 7,
u'wlo': 7,
u'#rm': 7,
u'\xe5dm': 7,
u'\xe5dy': 7,
u'iex': 7,
u'\xe6ar': 7,
u'\xf2#d': 7,
u'qan': 7,
u'#iw': 7,
u'\u010d#a': 7,
u'tpd': 7,
u'\xa3#i': 7,
u'hkr': 7,
u'hkl': 7,
u'euz': 7,
u'sij': 7,
u'yuz': 7,
u'\xf1er': 7,
u'wh#': 7,
u't\xf3r': 7,
u'why': 7,
u'##\xa3': 7,
u'm\xe9t': 7,
u'\xe6ek': 7,
u'kd\xf8': 7,
u'vaz': 7,
u's#\xe0': 7,
u'#m\xf3': 7,
u'pr\xe4': 7,
u'e\u0161e': 7,
u'at\xe5': 7,
u'n\xe9m': 7,
u'cdn': 7,
u'\xe1li': 7,
u'olj': 7,
u'fof': 7,
u'#tm': 7,
u'ipy': 7,
u'ttk': 7,
u'axo': 7,
u'cpc': 7,
u'\u0117#h': 7,
u'\xfcna': 7,
u'vi\xe8': 7,
u'xue': 7,
u'=##': 7,
u'evk': 7,
u'trn': 7,
u'e\xadd': 7,
u'bhf': 7,
u'k\xfcn': 7,
u'exj': 7,
u'seq': 7,
u'mlu': 7,
u'vzo': 7,
u'sxb': 7,
u'jca': 7,
u'agc': 7,
u'gvt': 7,
u'ueo': 7,
u'ueh': 7,
u'ctl': 7,
u'vmf': 7,
u'\xadde': 7,
u'ckw': 7,
u'bts': 7,
u'cbo': 7,
u'eoc': 7,
u'ptf': 7,
u'la\u0163': 7,
u'uaz': 7,
u'thf': 7,
u'ekm': 7,
u'spc': 7,
u'lm\xf6': 7,
u'et\xfa': 7,
u'\u015f#s': 7,
u'wco': 7,
u'ytv': 7,
u'+la': 7,
u'l\xfcb': 7,
u'yoo': 7,
u'u\xe9r': 7,
u'st\u0159': 7,
u'#\u03b9\u03b9': 7,
u'stw': 7,
u'st\xf6': 7,
u'wov': 7,
u'woe': 7,
u'wod': 7,
u'#uz': 7,
u'l#\xf6': 7,
u'ruj': 7,
u'fd#': 7,
u'ecz': 7,
u'awy': 7,
u't\u0159e': 7,
u'\xe9sy': 7,
u'dmk': 7,
u'fhd': 7,
u'p\u0119k': 7,
u'r\xe1#': 7,
u'n#\xe1': 7,
u'l\xe1z': 7,
u'mcl': 7,
u'xju': 7,
u'op\xe5': 7,
u'zuz': 7,
u'\u0159ed': 7,
u'tnt': 7,
u'dix': 7,
u'\xedos': 7,
u'zni': 7,
u'kkt': 7,
u'kk\xf8': 7,
u'\u010d#v': 7,
u'g\xe8n': 7,
u'\xe1ba': 7,
u'\xe1be': 7,
u'l\xeds': 7,
u'l\xedn': 7,
u'\xe6sf': 7,
u'pa\u0161': 7,
u'\u0103ub': 7,
u'hdn': 7,
u'moy': 7,
u'\xf8l\xe5': 7,
u'\xf8ed': 7,
u'#\u03b5\u03bc': 7,
u'\xeas#': 7,
u't\xe4t': 7,
u'wsb': 7,
u'hyh': 7,
u'wsy': 7,
u'\xe5bs': 7,
u'abp': 7,
u'ggc': 7,
u'akj': 7,
u'de\xe6': 7,
u'\u0119k#': 7,
u'ok\xf8': 7,
u'ok\xe6': 7,
u'rrn': 7,
u'r\xe9c': 7,
u'l\xe9n': 7,
u'fua': 7,
u'ewr': 7,
u'ewp': 7,
u'pe\xe6': 7,
u'apc': 7,
u'bi\u0161': 7,
u'sdm': 7,
u'xbu': 7,
u'\xe5sy': 7,
u'ud\xe5': 7,
u'og\xe6': 7,
u'ysr': 7,
u'tm\xe9': 7,
u'buy': 7,
u'buh': 7,
u'vcb': 7,
u'l#+': 7,
u'thd': 7,
u'nuu': 7,
u'kwe': 7,
u'kwd': 7,
u'icf': 7,
u'nnm': 7,
u'eb\xe4': 7,
u'msj': 7,
u't\xfab': 7,
u'xym': 7,
u'gk\xf8': 7,
u'hup': 7,
u'prn': 7,
u'lny': 7,
u'j\u015bc': 7,
u'\u03b1s#': 7,
u'wbu': 7,
u'rv\xe1': 7,
u'jup': 7,
u'igz': 7,
u'gu#': 7,
u'k\u0161i': 7,
u'mml': 7,
u'r#\u0111': 7,
u'izj': 7,
u'hih': 7,
u'ljs': 7,
u'rrb': 7,
u'+st': 7,
u'dpd': 7,
u'ebf': 7,
u'ebg': 7,
u'ebm': 7,
u'a\xebl': 7,
u'bbu': 7,
u'mfm': 7,
u'\xe9rv': 6,
u'\u015fov': 6,
u'\xe9r\xe9': 6,
u'dns': 6,
u'xti': 6,
u'jm\xf8': 6,
u'\xe1za': 6,
u'kfw': 6,
u'kfj': 6,
u'nzh': 6,
u'yff': 6,
u'tzu': 6,
u'tzt': 6,
u'tzg': 6,
u'k\xf6s': 6,
u'\xf5ko': 6,
u'mbh': 6,
u'mbp': 6,
u'j#z': 6,
u'\xf1ol': 6,
u'lry': 6,
u'zra': 6,
u'jig': 6,
u'\u010ds#': 6,
u'jta': 6,
u'vke': 6,
u'bjp': 6,
u't#\xe0': 6,
u'smt': 6,
u'scv': 6,
u'\xf8m\xf8': 6,
u'f\xe6j': 6,
u'\xe5\xe6n': 6,
u'xo#': 6,
u'\xe9jo': 6,
u'wry': 6,
u'\u0161ek': 6,
u'ugc': 6,
u'y\u0144#': 6,
u'nrr': 6,
u'\xfchw': 6,
u'\u0161\xf5k': 6,
u'vgy': 6,
u'\xe6tm': 6,
u'cii': 6,
u'\u0159\xed#': 6,
u'evf': 6,
u'trs': 6,
u'tr\xed': 6,
u'tr\xf4': 6,
u'sgp': 6,
u'\xf8av': 6,
u'gci': 6,
u'gco': 6,
u'n\xe7\xe3': 6,
u'lzm': 6,
u'\xe1#c': 6,
u'\xe1#p': 6,
u'otk': 6,
u'ucy': 6,
u'ucf': 6,
u'p\xfab': 6,
u'a#+': 6,
u'l\xf6v': 6,
u'\xf8eg': 6,
u'xgr': 6,
u'mvb': 6,
u'mvl': 6,
u'mvn': 6,
u'\xe7u#': 6,
u'hrl': 6,
u'hrd': 6,
u'aix': 6,
u'\xe0#p': 6,
u'\xe0#t': 6,
u'zca': 6,
u't\xfar': 6,
u'wax': 6,
u'kvu': 6,
u'uzu': 6,
u'ibh': 6,
u'no\xee': 6,
u'azh': 6,
u'#xb': 6,
u'eih': 6,
u'ei\xdf': 6,
u'gje': 6,
u'\xed#\u010d': 6,
u'\xed#p': 6,
u'wme': 6,
u'jrp': 6,
u'a++': 6,
u'ffk': 6,
u'uv\xf8': 6,
u'ioz': 6,
u'\xe2#f': 6,
u'bzo': 6,
u'g\xe6k': 6,
u'u\xe7k': 6,
u'\u0165as': 6,
u'eeo': 6,
u'y\xe1s': 6,
u'lkk': 6,
u'lkc': 6,
u'lkp': 6,
u'c\xe9a': 6,
u's\xe6e': 6,
u'#wc': 6,
u'i\u0159\xed': 6,
u'ur\xf3': 6,
u'ur\xfa': 6,
u'rs\xed': 6,
u't\u0161\xf5': 6,
u'#jk': 6,
u'eao': 6,
u'bci': 6,
u'hnp': 6,
u'hnu': 6,
u'lwe': 6,
u'wus': 6,
u'ji\u0159': 6,
u'do\xf8': 6,
u'vyc': 6,
u'wd#': 6,
u'vyn': 6,
u'm\xf6#': 6,
u'un\xe7': 6,
u'unv': 6,
u'unz': 6,
u'\xedah': 6,
u'kih': 6,
u'yee': 6,
u'\xf8uv': 6,
u'sn\xe5': 6,
u'sns': 6,
u'e#\xad': 6,
u'\xe9ud': 6,
u'vki': 6,
u'cyt': 6,
u'jf\xf8': 6,
u'oif': 6,
u'kmh': 6,
u'\xef#a': 6,
u'\xef#f': 6,
u'e\u015fb': 6,
u'iuk': 6,
u'fw#': 6,
u'eyo': 6,
u'ey\xf8': 6,
u'a#x': 6,
u'r\xf4l': 6,
u'#\xe9c': 6,
u'#\xe9d': 6,
u'#\xe9v': 6,
u't\xe6f': 6,
u'#\xe9#': 6,
u'syj': 6,
u'vby': 6,
u'rgc': 6,
u'\u0161\u0165a': 6,
u'rg\xe8': 6,
u'ufj': 6,
u's\xe9j': 6,
u'iic': 6,
u'pk#': 6,
u'eu\xad': 6,
u'mij': 6,
u'miz': 6,
u'xde': 6,
u'k\xe4i': 6,
u'\u015fbo': 6,
u'\xe9m#': 6,
u'dc\xf8': 6,
u'dcf': 6,
u'oab': 6,
u'q#v': 6,
u'q#b': 6,
u'q#k': 6,
u'\u010dec': 6,
u'\xe7il': 6,
u'm\xe5\xe6': 6,
u'nhs': 6,
u'eq#': 6,
u'to\xf8': 6,
u'muz': 6,
u'+#r': 6,
u'\xf8fs': 6,
u'+#\xe5': 6,
u'hsp': 6,
u'ahw': 6,
u'giz': 6,
u'y\xe5r': 6,
u'gi\xe6': 6,
u'icn': 6,
u'hsk': 6,
u'\xfabl': 6,
u'vtk': 6,
u'\xf6vi': 6,
u'#+m': 6,
u'\xe1ll': 6,
u'l\xf3s': 6,
u'uyp': 6,
u'fch': 6,
u'r\xe8#': 6,
u'mqi': 6,
u'\xf8z#': 6,
u'ehf': 6,
u'#\xe5e': 6,
u'ptb': 6,
u'al\xe5': 6,
u'oyu': 6,
u'e\xf8z': 6,
u'o\u017e#': 6,
u'a\u015fo': 6,
u'rtw': 6,
u'\xf2#m': 6,
u'tgy': 6,
u'qar': 6,
u'ahg': 6,
u'ixi': 6,
u'l\xe4s': 6,
u'bdy': 6,
u'si\xfa': 6,
u'i\xe8s': 6,
u'\xf3#m': 6,
u'\xf3#i': 6,
u'\xf3#g': 6,
u'r\xfaa': 6,
u'#vk': 6,
u'##q': 6,
u'g#\xad': 6,
u'm\xe9p': 6,
u'\xe6en': 6,
u'kd\xe6': 6,
u'bch': 6,
u'at\xe9': 6,
u'\xe6ja': 6,
u'mdf': 6,
u'xix': 6,
u'huc': 6,
u'wt#': 6,
u'\xe9pu': 6,
u'\xf3li': 6,
u'umq': 6,
u'foy': 6,
u'um=': 6,
u'khy': 6,
u'khl': 6,
u'ydw': 6,
u'ttd': 6,
u'hc#': 6,
u'vfl': 6,
u'saw': 6,
u'zpa': 6,
u'\xfari': 6,
u'n\xedo': 6,
u'gdu': 6,
u'vij': 6,
u'\u0117#d': 6,
u'bhe': 6,
u'pdm': 6,
u'ra\u015f': 6,
u'#\xe8#': 6,
u'uej': 6,
u'ctj': 6,
u't\xfcv': 6,
u'zi\xe8': 6,
u'ckv': 6,
u'\u03b9\u03b9#': 6,
u'qtr': 6,
u'tls': 6,
u'\xdf#e': 6,
u'c\xf8p': 6,
u'hpl': 6,
u'ptk': 6,
u'akg': 6,
u'zez': 6,
u'\u03b1#f': 6,
u'pt\xe9': 6,
u'nt\u0161': 6,
u'ilw': 6,
u'epn': 6,
u'epf': 6,
u'pl\xe1': 6,
u'g\xf8t': 6,
u'pln': 6,
u'#nh': 6,
u'\xf8gm': 6,
u'xyl': 6,
u'htn': 6,
u'htv': 6,
u'szu': 6,
u'aeb': 6,
u'zaa': 6,
u'\u015f#v': 6,
u'\xe6ba': 6,
u'uxo': 6,
u'tld': 6,
u'ccf': 6,
u'l\xfch': 6,
u'\u0161i\u0107': 6,
u'nmd': 6,
u'yov': 6,
u'mpz': 6,
u'il\xe5': 6,
u'n+o': 6,
u'n+p': 6,
u'd\xe5l': 6,
u'\xe9du': 6,
u'\xe9do': 6,
u'#\u015fe': 6,
u'rh\xe1': 6,
u'rhk': 6,
u'st\xfa': 6,
u'oxr': 6,
u'oxs': 6,
u'\xdf#n': 6,
u'b\xe9#': 6,
u'b\xe9l': 6,
u'b\xe9b': 6,
u'cg\xe6': 6,
u'ru\xf8': 6,
u'\xe6fl': 6,
u'flm': 6,
u'#hd': 6,
u'yc#': 6,
u'be\u015f': 6,
u'k\xf3w': 6,
u'wk#': 6,
u'zyd': 6,
u'\xe9st': 6,
u'\xadtr': 6,
u'upk': 6,
u'upd': 6,
u'ygk': 6,
u'ygv': 6,
u'#lc': 6,
u'isw': 6,
u'ba\xf1': 6,
u'zus': 6,
u'tuu': 6,
u'd#+': 6,
u'\xf8ld': 6,
u'wsz': 6,
u'ggg': 6,
u'#d\u0103': 6,
u'zja': 6,
u'##\xdf': 6,
u'ik\xe4': 6,
u'biy': 6,
u'pey': 6,
u'few': 6,
u'\xe5ss': 6,
u're\u015f': 6,
u'\xef#d': 6,
u'#xr': 6,
u'=s#': 6,
u'fyo': 6,
u'tmc': 6,
u'nfy': 6,
u'vcl': 6,
u'\u0107#m': 6,
u'\u0107#v': 6,
u'\u0161#\u0161': 6,
u'p#x': 6,
u'ocz': 6,
u'\xe9b\xe9': 6,
u'kwt': 6,
u'\xf6pf': 6,
u'lcy': 6,
u'lck': 6,
u'eyj': 6,
u'\xe1nb': 6,
u'i\xfan': 6,
u'nn\xf8': 6,
u'nnd': 6,
u'tiy': 6,
u'o\xeet': 6,
u'd\xe9a': 6,
u'd\xe9u': 6,
u'ej\xf3': 6,
u'\xe9gl': 6,
u'lnt': 6,
u'rvy': 6,
u'r#\xa3': 6,
u'pzo': 6,
u'guo': 6,
u'izm': 6,
u'#\xe7i': 6,
u'ri\xe6': 6,
u'ri\xe5': 6,
u'vnd': 6,
u'#dl': 6,
u're\xf8': 6,
u'vnh': 6,
u'\xe4s#': 6,
u'vox': 6,
u'ta\xf1': 6,
u'us\xe9': 6,
u'wss': 6,
u'ju#': 6,
u'\xe9is': 6,
u'\xe9il': 6,
u'avy': 6,
u'mfj': 6,
u'owr': 6,
u't\xed#': 5,
u'p\xe9b': 5,
u'lmc': 5,
u'dnr': 5,
u'czk': 5,
u'uou': 5,
u'vgj': 5,
u'tzc': 5,
u'j\xf6l': 5,
u'\u0144#d': 5,
u'xk\xf8': 5,
u'xki': 5,
u'az\xf3': 5,
u'ufg': 5,
u'ukf': 5,
u'\xednb': 5,
u'rz\xf3': 5,
u'\xe6kj': 5,
u'fii': 5,
u'ivc': 5,
u'ivz': 5,
u'r\xeat': 5,
u'#\xf3s': 5,
u'scm': 5,
u'mn#': 5,
u'smd': 5,
u'\xf6#i': 5,
u'pbo': 5,
u'\xf8mh': 5,
u'\xf8mb': 5,
u'\xe6bi': 5,
u'\xfapa': 5,
u'vlg': 5,
u'hzd': 5,
u'wrc': 5,
u'k\xe1n': 5,
u'dfp': 5,
u'dfs': 5,
u'crv': 5,
u'nrh': 5,
u'nrc': 5,
u'ij\xf8': 5,
u'p\xf6m': 5,
u'ijd': 5,
u'ijl': 5,
u'\xe6tv': 5,
u'ci\xe9': 5,
u'\u013e#o': 5,
u'tr\xe1': 5,
u'tr\xf3': 5,
u'pfc': 5,
u'c\xe6n': 5,
u'ci+': 5,
u'\xe9na': 5,
u'dbk': 5,
u'\xf8ty': 5,
u'szr': 5,
u'\xf8tn': 5,
u'cvp': 5,
u'\xe4ge': 5,
u'e\xe1l': 5,
u'weo': 5,
u'wev': 5,
u'\xf6me': 5,
u'jz#': 5,
u'm\xe6e': 5,
u'inq': 5,
u'cmt': 5,
u'cmp': 5,
u'l\xf6s': 5,
u'brs': 5,
u'tnn': 5,
u'pj#': 5,
u'pjo': 5,
u'\xf8ev': 5,
u'mvc': 5,
u'udz': 5,
u'hrr': 5,
u'hrg': 5,
u'emz': 5,
u'hr\xfa': 5,
u'\u03bd\u03b9#': 5,
u'gn\xe6': 5,
u'\xe0#k': 5,
u't\xfa#': 5,
u'\xedre': 5,
u'h\xe1i': 5,
u'kvd': 5,
u'j\xf6k': 5,
u'y#\u015b': 5,
u'ibk': 5,
u'cah': 5,
u'z\xf3s': 5,
u'ib\xe5': 5,
u'noj': 5,
u'\xf3ra': 5,
u'lft': 5,
u'azt': 5,
u'hvb': 5,
u'hvd': 5,
u'mb\xe9': 5,
u'qia': 5,
u'gju': 5,
u'\xed#i': 5,
u'\xf6gl': 5,
u'\xe3le': 5,
u'ifn': 5,
u'iy#': 5,
u'm#\u015f': 5,
u'iyo': 5,
u'gmt': 5,
u'u\xe7a': 5,
u'#a\u015f': 5,
u'aqm': 5,
u'dv#': 5,
u'lkm': 5,
u'c\xe9d': 5,
u'c\xe9r': 5,
u'e+b': 5,
u'\xe5ak': 5,
u'rs\xe9': 5,
u'ng\xf6': 5,
u'yap': 5,
u'yao': 5,
u'#jc': 5,
u'#jy': 5,
u'uk\u0161': 5,
u'#jp': 5,
u'bcm': 5,
u'fiq': 5,
u'dr\xad': 5,
u'ov\xe5': 5,
u'b\xf3s': 5,
u'gyb': 5,
u'dox': 5,
u's$#': 5,
u'vdc': 5,
u'\xe5#x': 5,
u'vd\xe5': 5,
u'#n\xe1': 5,
u'axu': 5,
u'ajb': 5,
u'\xf6rn': 5,
u'ruu': 5,
u'ma\xe7': 5,
u'maw': 5,
u'bo\u015f': 5,
u'n\xfa#': 5,
u'or\xe9': 5,
u'zsv': 5,
u'e#\xb0': 5,
u'gez': 5,
u'ujd': 5,
u'\xfant': 5,
u'h\xf6g': 5,
u'yyi': 5,
u'\xfcm#': 5,
u'#b\xe4': 5,
u'ajg': 5,
u'\xe7as': 5,
u'\xe7am': 5,
u'fwa': 5,
u'eyi': 5,
u'twh': 5,
u'eyu': 5,
u'hf\xe6': 5,
u'\u015fen': 5,
u'eqs': 5,
u'#\xe9m': 5,
u'jbl': 5,
u'oeg': 5,
u'\u0144ko': 5,
u'a\u0144k': 5,
u'ld\u0159': 5,
u'\xf6nf': 5,
u'\u0117be': 5,
u'chv': 5,
u'iih': 5,
u'\u03b5\u03b4\u03c1': 5,
u'r\xadg': 5,
u'\xe9m\xe6': 5,
u'ohc': 5,
u'dck': 5,
u'ubm': 5,
u'zds': 5,
u's\xedk': 5,
u'#zy': 5,
u'm\xe5a': 5,
u'\u010dez': 5,
u'm\xe5\xf8': 5,
u'aw\xe9': 5,
u'pku': 5,
u'ur\xed': 5,
u'r\xfcs': 5,
u'+#l': 5,
u'\xe7#a': 5,
u'hso': 5,
u'pp\xe9': 5,
u'icd': 5,
u'ro\u017e': 5,
u'g#\xa3': 5,
u'#+p': 5,
u'\xe5hi': 5,
u'iaj': 5,
u'fc\xf4': 5,
u'\xe1lb': 5,
u'uyo': 5,
u'uyn': 5,
u'poa': 5,
u'nnf': 5,
u'\xad#t': 5,
u'r\xf8i': 5,
u'r\xf8h': 5,
u'r\xf8t': 5,
u'eu\xe5': 5,
u'\xe9ej': 5,
u'su\xf8': 5,
u'lh\xe5': 5,
u'rk\xf6': 5,
u'r\xfap': 5,
u'#rv': 5,
u'#r\xf3': 5,
u'#r\xf6': 5,
u'm\xedt': 5,
u'rt\xe1': 5,
u'rt\xe0': 5,
u'uur': 5,
u'\xe6af': 5,
u'tgl': 5,
u'i#\u015b': 5,
u'g\xe1b': 5,
u'a\xe7\xe3': 5,
u'i#\xfc': 5,
u'd\u0159i': 5,
u'ojz': 5,
u'v\xedc': 5,
u'hk#': 5,
u'j\xfcn': 5,
u'i\xe8v': 5,
u'\u0161te': 5,
u'ou\xe9': 5,
u'f\xf8g': 5,
u'\xf3#j': 5,
u'#vt': 5,
u'#vc': 5,
u'dln': 5,
u'#v\xe3': 5,
u'vth': 5,
u'xur': 5,
u'rpf': 5,
u'\xe5h\xe5': 5,
u'gyt': 5,
u'\xe1ta': 5,
u's#\u0161': 5,
u'\xf6ce': 5,
u'##\u03b2': 5,
u'\xf8rj': 5,
u'xta': 5,
u'hoz': 5,
u'e\u0161o': 5,
u'mdu': 5,
u'smp': 5,
u'xio': 5,
u'lp\xf8': 5,
u'zte': 5,
u'n\xe9v': 5,
u'\xe9po': 5,
u'xys': 5,
u'yd\xe6': 5,
u'ydc': 5,
u'ipv': 5,
u'fp\xe5': 5,
u'zkr': 5,
u'#lf': 5,
u'u#\xe1': 5,
u'\xe9t\xe9': 5,
u'\xe9tp': 5,
u'gdp': 5,
u'gdx': 5,
u'uik': 5,
u'\xedli': 5,
u'\xf6ku': 5,
u'\xfcns': 5,
u'n#\u010d': 5,
u'exg': 5,
u'pd\xfc': 5,
u'xat': 5,
u'\xf8ol': 5,
u'ag\xe6': 5,
u'\xe5tk': 5,
u'uey': 5,
u'#y\xe1': 5,
u'bto': 5,
u'v\xe9b': 5,
u'phs': 5,
u'\xf8ca': 5,
u'\xdf#d': 5,
u'eoh': 5,
u'\xe5pi': 5,
u'la\u03b2': 5,
u'a\u03b2#': 5,
u't\xf8e': 5,
u'o\u0161i': 5,
u'ypv': 5,
u'kt\xf3': 5,
u'kt\xe5': 5,
u'+ha': 5,
u'ni\u0161': 5,
u'\u03ba\u03b5#': 5,
u'\u03b9\u03bd\u03b9': 5,
u'vds': 5,
u'bpl': 5,
u'ykh': 5,
u'dbj': 5,
u'spk': 5,
u'ghy': 5,
u'lmg': 5,
u'\u0161ic': 5,
u'\u015fti': 5,
u'#qt': 5,
u'm\xe0b': 5,
u'#\u017ee': 5,
u'nm\xfc': 5,
u'nmy': 5,
u'nmf': 5,
u'\u0117#a': 5,
u'd\xe4u': 5,
u'mpj': 5,
u'o\xebs': 5,
u'ojt': 5,
u'eg\xf3': 5,
u'egc': 5,
u'#\u03b9\u03bd': 5,
u'asw': 5,
u'dtd': 5,
u'rh#': 5,
u'rb\xe9': 5,
u'st\xe4': 5,
u'oxt': 5,
u'oxp': 5,
u'oxn': 5,
u'oxo': 5,
u'oxd': 5,
u'wot': 5,
u'wos': 5,
u'#u\xe7': 5,
u'l#\xdf': 5,
u'l#\xba': 5,
u'p\xe4r': 5,
u'ruy': 5,
u'z#\xe5': 5,
u'ma\xef': 5,
u'h\xfct': 5,
u'kce': 5,
u'ma\xdf': 5,
u'flf': 5,
u'l\xe5l': 5,
u'hlu': 5,
u'#\xf8c': 5,
u'#\xf8f': 5,
u'pyg': 5,
u'y\xfcr': 5,
u'\xe9se': 5,
u'dmb': 5,
u'\xf3#v': 5,
u'\xedct': 5,
u'li\u010d': 5,
u'kgo': 5,
u'r\xe1t': 5,
u'ak\xf3': 5,
u'xup': 5,
u'isq': 5,
u'l\xe1c': 5,
u'n#\xad': 5,
u'\xe6ho': 5,
u'glv': 5,
u'op\xf3': 5,
u'zub': 5,
u'zua': 5,
u'zuk': 5,
u'b\xf6l': 5,
u'di\u010d': 5,
u'nyg': 5,
u'#\u0161\u0165': 5,
u'#\u0161t': 5,
u'tuy': 5,
u'bmm': 5,
u'd#\xb0': 5,
u'hdp': 5,
u'jft': 5,
u'r\xf6l': 5,
u'i\xe7#': 5,
u'\xf8l\xf8': 5,
u'\xf8lf': 5,
u'\xf8lb': 5,
u'c\xe3u': 5,
u'd#\xa3': 5,
u'ab\xe5': 5,
u'ab\xe6': 5,
u'abz': 5,
u'deq': 5,
u'ryi': 5,
u'de\xf8': 5,
u'zjn': 5,
u'rrl': 5,
u'#d\xe4': 5,
u'vn\xf8': 5,
u'ujn': 5,
u'a\u015f\xe7': 5,
u'ikw': 5,
u'iky': 5,
u'fuz': 5,
u'\u03ba\u03ba\u03b5': 5,
u'ewb': 5,
u'ewu': 5,
u'a\u0142#': 5,
u'=pl': 5,
u'pe=': 5,
u'\xfcte': 5,
u'v\xe3l': 5,
u'\xe5su': 5,
u'u\xf8e': 5,
u'vrd': 5,
u'\xef#v': 5,
u'coz': 5,
u'es\xad': 5,
u'es\u0142': 5,
u'#kb': 5,
u'gnh': 5,
u'k\xe6k': 5,
u'enx': 5,
u'aj\xf6': 5,
u'wsh': 5,
u'\xe4ch': 5,
u'nuj': 5,
u'kwo': 5,
u'ujk': 5,
u'\xe5jo': 5,
u'ic\xe3': 5,
u'jys': 5,
u'cnt': 5,
u'\xe1nr': 5,
u'\xe7k#': 5,
u'o\u015ft': 5,
u'\xf3sc': 5,
u'jdm': 5,
u'imd': 5,
u'd\xe9c': 5,
u'an\xed': 5,
u'pr\xf6': 5,
u'huf': 5,
u'prb': 5,
u'prp': 5,
u'f\xf6r': 5,
u'\xf9n#': 5,
u'#p\xfa': 5,
u'h\xe4t': 5,
u'p\xe1t': 5,
u'cbu': 5,
u'\xe1rd': 5,
u'\xe1ro': 5,
u'yvs': 5,
u'ynm': 5,
u'izr': 5,
u'guz': 5,
u'bfa': 5,
u'hiu': 5,
u'hiy': 5,
u'ri\u017e': 5,
u'bf\xe6': 5,
u'pv#': 5,
u'p+b': 5,
u'p+l': 5,
u'#t\xf3': 5,
u'wnb': 5,
u'i\u010ds': 5,
u'\xe5b\xe5': 5,
u'rrk': 5,
u'cfp': 5,
u'caa': 5,
u'us$': 5,
u'vcf': 5,
u'xra': 5,
u'ebk': 5,
u'av\xe5': 5,
u'hm\xe5': 5,
u'tjy': 5,
u't\xe5t': 5,
u'dnn': 4,
u'=co': 4,
u't\xedo': 4,
u'czb': 4,
u'jm\xe6': 4,
u'uok': 4,
u'\xe6k\xe6': 4,
u'\xe1zs': 4,
u'#o\xf9': 4,
u'kf#': 4,
u'nzu': 4,
u'kf\xf8': 4,
u'kf\xe6': 4,
u'e\u0144s': 4,
u'a\xe9r': 4,
u'rgw': 4,
u'cuh': 4,
u'tzp': 4,
u'haq': 4,
u'ha\xe7': 4,
u'\u0144#s': 4,
u'azm': 4,
u'\xf1os': 4,
u'\xf1oa': 4,
u'f\xe2n': 4,
u'lr\xf8': 4,
u'rz\u0105': 4,
u'jiu': 4,
u'o\xb4e': 4,
u'm\xf3r': 4,
u'\xbacm': 4,
u'zo\xe6': 4,
u'ukb': 4,
u'rzu': 4,
u'ytl': 4,
u'#c\xb0': 4,
u'kj\xf6': 4,
u'vky': 4,
u'uod': 4,
u'\xe6pl': 4,
u'\xe6pi': 4,
u't#\u015f': 4,
u'ezj': 4,
u'he\xe1': 4,
u't#\xea': 4,
u'f\xe6b': 4,
u'xod': 4,
u'xob': 4,
u'xom': 4,
u'f\xe6#': 4,
u'gfp': 4,
u'gfj': 4,
u'gfm': 4,
u'aao': 4,
u'aau': 4,
u'ugp': 4,
u'crf': 4,
u'cr\xea': 4,
u'#g\xe8': 4,
u'nrg': 4,
u'h\xe9a': 4,
u'knn': 4,
u'+bu': 4,
u'\xe1d\xe1': 4,
u'cij': 4,
u'\xf3ze': 4,
u'ci\xe8': 4,
u'trz': 4,
u'bv\xe6': 4,
u'tr\xe4': 4,
u'\u013e#s': 4,
u'sgs': 4,
u'pfp': 4,
u'\xf8am': 4,
u'r\xf3i': 4,
u'ps\u012b': 4,
u'\xe1de': 4,
u'\xe1#l': 4,
u'k\xe5d': 4,
u'\u015fcu': 4,
u'xor': 4,
u'dbv': 4,
u'jao': 4,
u'\xe5r\xe5': 4,
u'euq': 4,
u'le\u0144': 4,
u'zgo': 4,
u'tmp': 4,
u'ofw': 4,
u'wep': 4,
u'a#\u017e': 4,
u'vsj': 4,
u'\xadbe': 4,
u'a#$': 4,
u'a#\xe1': 4,
u'\xe5mu': 4,
u'l\xf6g': 4,
u'l\xf6n': 4,
u'brr': 4,
u'tn#': 4,
u'nkj': 4,
u'br\xe5': 4,
u'nk\xe5': 4,
u'er\xfa': 4,
u'\xf8e#': 4,
u'#zm': 4,
u'ogc': 4,
u'\xe0#j': 4,
u'\xe0#n': 4,
u'\xe0#c': 4,
u'zch': 4,
u'zci': 4,
u'lc\xe1': 4,
u'kv#': 4,
u'vwl': 4,
u'#t\xfa': 4,
u'h\xe1t': 4,
u'kvm': 4,
u'i\u0161k': 4,
u'gu\xea': 4,
u'\xeat#': 4,
u'uzb': 4,
u'z\xf3b': 4,
u'\xf3r#': 4,
u'ha\u0142': 4,
u's\u012bt': 4,
u'yip': 4,
u'pnb': 4,
u'mr\xe6': 4,
u'j\xe6p': 4,
u'mrf': 4,
u'uvf': 4,
u'am\xad': 4,
u'loq': 4,
u'\xed#d': 4,
u'\xed#b': 4,
u'\xed#v': 4,
u'\xfaa#': 4,
u't\xf6m': 4,
u'jrb': 4,
u'\xe5er': 4,
u'ffj': 4,
u'nc\xe9': 4,
u'h\xfa#': 4,
u'qbe': 4,
u'dtn': 4,
u'gml': 4,
u'o\xe5n': 4,
u'\xe4t#': 4,
u'lkb': 4,
u'e+i': 4,
u'jny': 4,
u'\xeape': 4,
u'e\xefl': 4,
u'cye': 4,
u'cyo': 4,
u'ur\xe8': 4,
u'ym\xe1': 4,
u'\xfa#a': 4,
u'ng=': 4,
u'kez': 4,
u'kew': 4,
u'#jv': 4,
u'mze': 4,
u'c#y': 4,
u'xse': 4,
u'#\u03bf#': 4,
u'bcn': 4,
u'bck': 4,
u'hnk': 4,
u'bcg': 4,
u'o\u0148#': 4,
u'e\u010da': 4,
u'dr\xe8': 4,
u'drt': 4,
u'drm': 4,
u'ov\xed': 4,
u'xhi': 4,
u'lw\xf3': 4,
u'c\xb0#': 4,
u'wun': 4,
u'fih': 4,
u'\xe9qu': 4,
u'jju': 4,
u'gye': 4,
u'gy\xfc': 4,
u'q##': 4,
u'xfi': 4,
u'\xe6h#': 4,
u'kiw': 4,
u'\xe5#\xb1': 4,
u'#nf': 4,
u'axp': 4,
u'\xe6fn': 4,
u'xwo': 4,
u'axk': 4,
u'\xf3\u0142y': 4,
u'#\xfei': 4,
u'hbr': 4,
u'ayc': 4,
u'ayw': 4,
u'\xfeic': 4,
u'c\xe1c': 4,
u'e#\xe1': 4,
u'\xe4is': 4,
u'clc': 4,
u'cqb': 4,
u'zln': 4,
u'\xfanr': 4,
u'h\xf6p': 4,
u'kmz': 4,
u'ru\u0161': 4,
u'zl\xed': 4,
u'#bq': 4,
u'vhf': 4,
u'\xfd#h': 4,
u'\xfd#d': 4,
u'vh\xf8': 4,
u'ajr': 4,
u'iut': 4,
u'o\xf9#': 4,
u'\xe7ab': 4,
u'bkl': 4,
u'bki': 4,
u'\xe3u#': 4,
u'pck': 4,
u'm\xadt': 4,
u'r\xeap': 4,
u'o#=': 4,
u'\xe9id': 4,
u'\xe6po': 4,
u'gaj': 4,
u'syb': 4,
u'cuz': 4,
u'cug': 4,
u'ld\xed': 4,
u'rg\xe9': 4,
u'ezl': 4,
u'ezd': 4,
u'h\xed#': 4,
u's\xe9b': 4,
u'vlk': 4,
u'vl\xe5': 4,
u'#f\xf3': 4,
u'chg': 4,
u'd\u0103#': 4,
u'eu=': 4,
u'ts\xe1': 4,
u'sf\xe2': 4,
u'hl\xfc': 4,
u'\xf8by': 4,
u'wy#': 4,
u'adq': 4,
u'ad\xf3': 4,
u'\u0137is': 4,
u'rcb': 4,
u'u\u0161\u0137': 4,
u'ku\xe7': 4,
u'zda': 4,
u's\xedb': 4,
u'vph': 4,
u'ku\u017a': 4,
u'imv': 4,
u'cl\xe9': 4,
u'cyr': 4,
u'to\xe6': 4,
u'gpi': 4,
u'\xfcrr': 4,
u'+#n': 4,
u'h\xe9s': 4,
u'n\u0117b': 4,
u'\xe9al': 4,
u'el\xe1': 4,
u'hsc': 4,
u'hsm': 4,
u'hss': 4,
u'ppk': 4,
u'd\u017ea': 4,
u'ro\xe6': 4,
u'\xeele': 4,
u'\u0131r#': 4,
u's\xe1g': 4,
u'm\xe1\u0161': 4,
u'#+l': 4,
u'dpp': 4,
u'ppt': 4,
u'gt\xf8': 4,
u'shh': 4,
u'dpm': 4,
u'yue': 4,
u'yug': 4,
u'm\xe1i': 4,
u'\u010das': 4,
u'\u010dan': 4,
u'l\xf3n': 4,
u'fcb': 4,
u'nlh': 4,
u'y\xe5b': 4,
u'g\xe5p': 4,
u'u\xeas': 4,
u'pty': 4,
u'su\xe9': 4,
u'#jl': 4,
u'al\xe8': 4,
u'al\xf2': 4,
u'rkc': 4,
u'oyz': 4,
u'oyt': 4,
u'll\xe8': 4,
u'h\xe6c': 4,
u'd\u01d0m': 4,
u'#r\xe4': 4,
u'rt\xe7': 4,
u'\xf2#a': 4,
u'dmr': 4,
u'\u0144cz': 4,
u'qai': 4,
u'm\xfag': 4,
u'yln': 4,
u'\u010d#n': 4,
u'\u010d#t': 4,
u'r\xe4d': 4,
u'=do': 4,
u'ed\xe1': 4,
u'pxb': 4,
u'\xba#c': 4,
u'\xf1ed': 4,
u'lt\xfc': 4,
u'c\xe8s': 4,
u'\xf3#e': 4,
u'\xf3#d': 4,
u'#vp': 4,
u'##\xad': 4,
u'jop': 4,
u'jot': 4,
u'jow': 4,
u'rpy': 4,
u'rp\xe6': 4,
u'zk#': 4,
u'g#\xe0': 4,
u'\xe1ts': 4,
u'#+s': 4,
u'\xe8#f': 4,
u's#\xf6': 4,
u'=re': 4,
u'myo': 4,
u'#m\xe8': 4,
u'#m\xfa': 4,
u'nd\u017e': 4,
u'\xdfe#': 4,
u'ho\u0142': 4,
u'hoj': 4,
u'\xefla': 4,
u'\xf1in': 4,
u'xiu': 4,
u'lpp': 4,
u'lpk': 4,
u'n\xe9b': 4,
u'n\xe9g': 4,
u'k\u0131r': 4,
u'\u01d0ma': 4,
u'prf': 4,
u'dhm': 4,
u'dhl': 4,
u'drd': 4,
u'ol\xe9': 4,
u'\u0161\u0137i': 4,
u'olw': 4,
u'kh\xe1': 4,
u'\u017ami': 4,
u'tmb': 4,
u'qin': 4,
u'#a\xe9': 4,
u'g\xe9a': 4,
u'\xe6ry': 4,
u'bl\xe9': 4,
u'\xe1ce': 4,
u'tt\xe5': 4,
u'blv': 4,
u'\xe2nt': 4,
u'kg\xe6': 4,
u'fza': 4,
u'#lb': 4,
u'lnd': 4,
u'axn': 4,
u'r\xe1b': 4,
u'xmo': 4,
u'\xf8kn': 4,
u'i\xe0#': 4,
u'u#\xfe': 4,
u'u#\xe7': 4,
u'zp#': 4,
u't\xebs': 4,
u'wpo': 4,
u'wpg': 4,
u'b\xfcr': 4,
u'gdb': 4,
u'jgl': 4,
u'ui\xf1': 4,
u'cp\xf8': 4,
u'ohm': 4,
u'cpk': 4,
u'qma': 4,
u'dfh': 4,
u'\xedle': 4,
u'\xfcng': 4,
u'#e+': 4,
u'\xe6ch': 4,
u'kl\xf3': 4,
u'trl': 4,
u'rd\xf3': 4,
u'o\xf8v': 4,
u'rd\xe8': 4,
u'o\xf8e': 4,
u'bhk': 4,
u'pdf': 4,
u'#p\xe4': 4,
u'tr\xe8': 4,
u'ml\xe1': 4,
u't\xe7u': 4,
u'\xe9hi': 4,
u'ag\xe5': 4,
u'jcs': 4,
u'zi\u0144': 4,
u'ue\xf8': 4,
u'le\xef': 4,
u'\u20ac##': 4,
u'zih': 4,
u'npk': 4,
u'\xb1##': 4,
u'od\u0103': 4,
u'vmb': 4,
u'ih\xf8': 4,
u'ckd': 4,
u'et\xe9': 4,
u'bty': 4,
u'btk': 4,
u'cbn': 4,
u'cbl': 4,
u'g\xfcr': 4,
u'\xdf#k': 4,
u'xep': 4,
u'n\xe5h': 4,
u'hph': 4,
u'\u03b9\u03b9\u03b9': 4,
u'ptm': 4,
u'ak\xe5': 4,
u'h#z': 4,
u'ak\u0131': 4,
u'\xe5p\xe5': 4,
u'zeo': 4,
u'zev': 4,
u'\xfagi': 4,
u'\xedtr': 4,
u'\xedti': 4,
u'\xf8hy': 4,
u'il\xe4': 4,
u'\xb0#n': 4,
u'th\xfa': 4,
u'epy': 4,
u'vdu': 4,
u'bpt': 4,
u'g\xf8k': 4,
u'vdm': 4,
u'\xf8g\xe6': 4,
u'mt\xe9': 4,
u'lh\xe4': 4,
u'ekh': 4,
u'sp\xf6': 4,
u'sp\xe9': 4,
u'dxx': 4,
u'spt': 4,
u'aoc': 4,
u'aoh': 4,
u'ghp': 4,
u'x#\xe6': 4,
u'x#\xe5': 4,
u'x#\xf8': 4,
u'zaj': 4,
u'\u015f#i': 4,
u'\u015f#b': 4,
u'\u015f#d': 4,
u'wcz': 4,
u'\u016btf': 4,
u'uxv': 4,
u'uxy': 4,
u'tlb': 4,
u'\xe1sq': 4,
u'l\xfcm': 4,
u'l\xfcs': 4,
u'b#w': 4,
u'\xf3th': 4,
u'\xf3ti': 4,
u'w\xf3w': 4,
u'h\xf8e': 4,
u'\xe5\xf8e': 4,
u'u\xe9s': 4,
u'\xe8vr': 4,
u'#rz': 4,
u'gpu': 4,
u'rhr': 4,
u'st\xe1': 4,
u'oxk': 4,
u'oxg': 4,
u'h\xe6u': 4,
u'b\xe9k': 4,
u'\xf3\u0142#': 4,
u'hby': 4,
u'nft': 4,
u'p\xe4i': 4,
u'cdg': 4,
u'i\u017en': 4,
u'o\xe7a': 4,
u'hl\xf6': 4,
u'#\xf8t': 4,
u'k\xf3\u0142': 4,
u'awm': 4,
u'mgo': 4,
u'fgl': 4,
u'dp\xe6': 4,
u'\xe6an': 4,
u'\xe6al': 4,
u'm\xe6i': 4,
u'zyb': 4,
u'zyc': 4,
u'zya': 4,
u'zyl': 4,
u'foh': 4,
u'z\u0105d': 4,
u'dmm': 4,
u'a\xf1i': 4,
u'uph': 4,
u'upv': 4,
u'rlf': 4,
u'kgu': 4,
u'vjo': 4,
u'ne\u010d': 4,
u'#lw': 4,
u'd\xfcr': 4,
u'u\xe1n': 4,
u'l\xe1l': 4,
u'l\xe1i': 4,
u'l\xe1t': 4,
u'ak\xe6': 4,
u'op\xe4': 4,
u'zug': 4,
u'\xadpr': 4,
u'\u012bti': 4,
u'ny\xe5': 4,
u'ooe': 4,
u'zn\xe1': 4,
u'fld': 4,
u'a\xeet': 4,
u'kkm': 4,
u'r\xeds': 4,
u'#\u0161k': 4,
u'iwc': 4,
u'iwo': 4,
u'tu\xf8': 4,
u'l\xedk': 4,
u'tuh': 4,
u'd#\xe1': 4,
u'bmf': 4,
u'hdu': 4,
u'hdz': 4,
u'mo\xe7': 4,
u'xnu': 4,
u'u\xf8v': 4,
u'hyi': 4,
u'hyn': 4,
u'ggu': 4,
u'jdl': 4,
u'u=s': 4,
u'ko\u0144': 4,
u'#d\u01d0': 4,
u'ok\xf3': 4,
u'koj': 4,
u'a\u015fc': 4,
u'r\xe9p': 4,
u'r\xe9f': 4,
u'r\xe9b': 4,
u'r\xe9a': 4,
u'u\u0142a': 4,
u'\u0142#k': 4,
u'hca': 4,
u'fuc': 4,
u'ewd': 4,
u'ewy': 4,
u'bim': 4,
u'g\xf3\u0142': 4,
u'pew': 4,
u'ly\xf8': 4,
u'sdd': 4,
u'sdc': 4,
u'mk\xe5': 4,
u'sd\xe5': 4,
u'bcb': 4,
u'n\xe6h': 4,
u'gcp': 4,
u'gcs': 4,
u'\u017eak': 4,
u'\xe5sv': 4,
u'dwo': 4,
u'lfh': 4,
u'f#+': 4,
u'd\xf3#': 4,
u'ogj': 4,
u'\u0105dk': 4,
u'#xh': 4,
u'#xm': 4,
u'\u03bf##': 4,
u'ysu': 4,
u'cja': 4,
u'ioh': 4,
u'm\u011b\u0159': 4,
u'\xe7on': 4,
u'fyg': 4,
u'#\xb1#': 4,
u'pih': 4,
u'o\u0142o': 4,
u'\u0107#f': 4,
u'f\xe9l': 4,
u'gnn': 4,
u'd\xedv': 4,
u'en\xf6': 4,
u'k\xe6s': 4,
u'a\xe5n': 4,
u'\xe9ca': 4,
u'wsa': 4,
u'go\xb4': 4,
u'ra\xed': 4,
u'ocv': 4,
u'zb#': 4,
u'oc\xe9': 4,
u'oc\xe6': 4,
u'kwi': 4,
u'kw#': 4,
u'wfu': 4,
u'lcz': 4,
u'vvu': 4,
u'u\u017am': 4,
u'ic\xe9': 4,
u'\xe5ju': 4,
u'cng': 4,
u'twt': 4,
u'\xe1n\xed': 4,
u'i\xfa#': 4,
u'csm': 4,
u'd\xe9k': 4,
u'tp\xe6': 4,
u'h\xe1\xed': 4,
u'\xe9ge': 4,
u'\xe8se': 4,
u'huo': 4,
u'gkh': 4,
u'dls': 4,
u'lnb': 4,
u'wbr': 4,
u'jue': 4,
u'cbm': 4,
u'rvv': 4,
u'#p\u0119': 4,
u'l\xfd#': 4,
u'r#\xed': 4,
u'r#\u017e': 4,
u'a\xb4s': 4,
u'gub': 4,
u'ynj': 4,
u'ynu': 4,
u'izc': 4,
u'izk': 4,
u'p+k': 4,
u'zz#': 4,
u'#t\xe2': 4,
u'wnt': 4,
u'grz': 4,
u'ku\u0161': 4,
u'ibt': 4,
u'ta\xf8': 4,
u'ta\xe6': 4,
u'fe\xf8': 4,
u'kbl': 4,
u'nf#': 4,
u'nfk': 4,
u'nfj': 4,
u'by\xe5': 4,
u'h#\xe9': 4,
u'wso': 4,
u'ja\u0142': 4,
u'\xf8pi': 4,
u'd\xe1n': 4,
u'l\xe6m': 4,
u'l\xe6o': 4,
u'wst': 4,
u'o\xe6i': 4,
u'\xb4er': 4,
u'rl\xe9': 4,
u'\u017ena': 4,
u'owd': 4,
u'\u0142y#': 4,
u'crm': 4,
u'zvi': 4,
u'\xe9rs': 3,
u'\xe9r\xea': 3,
u'lmw': 3,
u'lmn': 3,
u'dnd': 3,
u'\xe4lj': 3,
u'dnp': 3,
u'czi': 3,
u'czn': 3,
u'dgk': 3,
u'dgu': 3,
u'#o\xe6': 3,
u'nzp': 3,
u'nzw': 3,
u'nzt': 3,
u'nzj': 3,
u'nzl': 3,
u'xv\xe6': 3,
u'frp': 3,
u'cuo': 3,
u'tzr': 3,
u'tzw': 3,
u'fr\xed': 3,
u'vkn': 3,
u'bnk': 3,
u'ha\xeb': 3,
u'\u0144#m': 3,
u'\u0144#h': 3,
u'\u0144#e': 3,
u'\u0144#b': 3,
u'mbb': 3,
u'soz': 3,
u'ha\u0161': 3,
u'\xe4st': 3,
u'azc': 3,
u'azd': 3,
u'az\xe1': 3,
u'j#\xe9': 3,
u'lrr': 3,
u'os\xe5': 3,
u'+me': 3,
u'oej': 3,
u'\xe9vy': 3,
u't\xe9n': 3,
u't\xe9i': 3,
u't\u0103n': 3,
u't\u0103l': 3,
u'jip': 3,
u'o\xb4s': 3,
u'm\xf3#': 3,
u'onq': 3,
u'rzl': 3,
u'\u03c0\u03bf\u03c7': 3,
u'fm#': 3,
u'$#m': 3,
u'$#p': 3,
u'#c\xf8': 3,
u'#c\xe8': 3,
u'#rk': 3,
u'kj\xf8': 3,
u'yz#': 3,
u'uoc': 3,
u'yzn': 3,
u'r\xeas': 3,
u'#c\u0103': 3,
u'ezt': 3,
u't#\xb0': 3,
u'bj\xf6': 3,
u't#\xad': 3,
u't#\xa3': 3,
u't#\xfc': 3,
u'\xf6#b': 3,
u'\xf6#d': 3,
u'pbd': 3,
u'pbb': 3,
u'\xebn#': 3,
u'#fh': 3,
u'wrz': 3,
u'k\xe1d': 3,
u'je\xe6': 3,
u'ojb': 3,
u'crb': 3,
u'\xf3g#': 3,
u'vl\xe6': 3,
u'cr\xe1': 3,
u'#gv': 3,
u'h\xe9o': 3,
u'\xadfo': 3,
u'fcr': 3,
u'\u0131k#': 3,
u'ijf': 3,
u'ci\xe0': 3,
u'trk': 3,
u'evm': 3,
u'trp': 3,
u'r\xf3\u017c': 3,
u'\u013e#f': 3,
u'xcl': 3,
u'xcu': 3,
u'r\xf3w': 3,
u'mjo': 3,
u'\xebr#': 3,
u'lzs': 3,
u'\xe1#k': 3,
u'\xe9nv': 3,
u't\xe1b': 3,
u'#\xeat': 3,
u'gbf': 3,
u'aew': 3,
u'sz\xf6': 3,
u'dd+': 3,
u'ja\xe6': 3,
u'ucd': 3,
u'\u0119ci': 3,
u'rbt': 3,
u'zgr': 3,
u'\xfain': 3,
u'\xe4gu': 3,
u'wec': 3,
u'wek': 3,
u'a#\u010d': 3,
u'krr': 3,
u'\u03ad\u03c1#': 3,
u'eu\xe6': 3,
u'cwo': 3,
u'cm\xe6': 3,
u'brb': 3,
u'tn\xf8': 3,
u'er\xea': 3,
u'dhd': 3,
u'xge': 3,
u'\u014fng': 3,
u'hrw': 3,
u'hrn': 3,
u'\xe9bo': 3,
u'em\xe9': 3,
u'em\xf6': 3,
u'gnr': 3,
u'gn\xe9': 3,
u'\xe0#r': 3,
u'lcl': 3,
u'waj': 3,
u'wau': 3,
u'\xeetr': 3,
u'kvn': 3,
u'guf': 3,
u'#\u017ce': 3,
u'uzg': 3,
u'uzs': 3,
u'mbj': 3,
u'noz': 3,
u'yic': 3,
u'yiv': 3,
u'pny': 3,
u'pnd': 3,
u'j\xe6s': 3,
u'mrv': 3,
u'j#y': 3,
u'ce=': 3,
u'eii': 3,
u'\xe9fe': 3,
u'ps+': 3,
u'nr\xe1': 3,
u'gjf': 3,
u'srn': 3,
u'gj\xe6': 3,
u'\xf8#y': 3,
u'\xed#n': 3,
u'\xed#l': 3,
u'\xed#j': 3,
u'lo\xe5': 3,
u'\xed##': 3,
u'k\xe4#': 3,
u't\xf6p': 3,
u't\xf6h': 3,
u'puj': 3,
u'jro': 3,
u'jrk': 3,
u'\u015f\xe7\u0131': 3,
u'ip\u0117': 3,
u'rw\xe4': 3,
u'ce\xe1': 3,
u'ka\u0144': 3,
u'nc\xf9': 3,
u'kaw': 3,
u'\xeeme': 3,
u'ymy': 3,
u'\u03b1\u03c0\u03bf': 3,
u'r\xe7u': 3,
u'u\xe7o': 3,
u'c\xf2b': 3,
u'nc\xf3': 3,
u'bgu': 3,
u'aqi': 3,
u'pwi': 3,
u'fp\xf8': 3,
u'lkt': 3,
u'wi\u0119': 3,
u'\u03c3\u03c4\u03b9': 3,
u'c\xe9#': 3,
u'ozf': 3,
u'c\xe9s': 3,
u'c\xe9u': 3,
u'wix': 3,
u'fpb': 3,
u'#wp': 3,
u'#w\xfc': 3,
u'#w\xe4': 3,
u'fj\xe6': 3,
u'tbj': 3,
u'djs': 3,
u'qfn': 3,
u'yaa': 3,
u'es\u014f': 3,
u'#j\xe9': 3,
u'c#\xe6': 3,
u'eaw': 3,
u'hn\xfa': 3,
u'sjg': 3,
u'o+k': 3,
u'lwo': 3,
u'nfs': 3,
u'o\u0148s': 3,
u'wuj': 3,
u'b\xf3n': 3,
u'\u03b1\u03c4\u03ac': 3,
u'fmr': 3,
u'fms': 3,
u'\u0107#b': 3,
u'fmd': 3,
u'c\u0103t': 3,
u'doj': 3,
u'sa\xb4': 3,
u'om\xed': 3,
u'\u0107##': 3,
u'omj': 3,
u'omw': 3,
u'\xe6ha': 3,
u'\xf3nb': 3,
u'\xf3no': 3,
u'\xedna': 3,
u'wdr': 3,
u'#\xb4#': 3,
u'om\u011b': 3,
u'\xedng': 3,
u'kiu': 3,
u'yef': 3,
u'\xe5#\xa3': 3,
u'\xefrb': 3,
u'\xb0cm': 3,
u'#n\xfa': 3,
u'axs': 3,
u'\xe6fs': 3,
u'axd': 3,
u'\u03c2##': 3,
u'cgl': 3,
u'e#\u03b5': 3,
u'gvr': 3,
u'ayg': 3,
u'ma\xed': 3,
u'bo\u017e': 3,
u'or\xed': 3,
u'\xf8ju': 3,
u'c\xe1z': 3,
u'c\xe1n': 3,
u'\xe1\xedt': 3,
u'e#\xed': 3,
u'or\u0119': 3,
u'\xe9us': 3,
u'ge=': 3,
u'a\xedn': 3,
u'oiu': 3,
u'\xfana': 3,
u'\xfanf': 3,
u'\xfanm': 3,
u'\xfanp': 3,
u'yye': 3,
u'zpi': 3,
u's\xf5n': 3,
u'#bz': 3,
u'\xef#j': 3,
u'\xef#e': 3,
u'\xfcme': 3,
u'+af': 3,
u'+ak': 3,
u'\xfd#n': 3,
u'\xfd#m': 3,
u'p\xf3p': 3,
u'iug': 3,
u'eyg': 3,
u'eyv': 3,
u'vhp': 3,
u'\xe4#m': 3,
u'c\u01ceu': 3,
u'i\xe5n': 3,
u'\u013e#h': 3,
u'eqa': 3,
u'o#\xe8': 3,
u'ga\u013e': 3,
u'yca': 3,
u'#\xe9g': 3,
u'#\xe9p': 3,
u'\xe5ur': 3,
u'jbu': 3,
u'i\xf8#': 3,
u'ldz': 3,
u'\u03c5\u03c0\u03ad': 3,
u'\u03c0\u03ad\u03c1': 3,
u'\u0142ka': 3,
u'ufh': 3,
u'nsx': 3,
u'\u03b5\u03b9#': 3,
u'zhk': 3,
u'jga': 3,
u'\xf6n#': 3,
u's\xe9t': 3,
u'\xf6nn': 3,
u'#f\xf6': 3,
u'iig': 3,
u'ch\xe2': 3,
u'\xe1dr': 3,
u'ch\xf3': 3,
u'ch\xf8': 3,
u'qu\xea': 3,
u'bw#': 3,
u'#\xf6k': 3,
u'pgt': 3,
u'\xfcv#': 3,
u'mi\xe6': 3,
u'mi\xf1': 3,
u'sfc': 3,
u'xdv': 3,
u'xds': 3,
u'c\xf9n': 3,
u'r\u0119b': 3,
u'#\xedn': 3,
u'wyz': 3,
u'wyd': 3,
u'wyb': 3,
u'oa\xe7': 3,
u'\xf8fk': 3,
u'\xe6#\xe6': 3,
u'\u0103t\u0103': 3,
u'\xf8fd': 3,
u'ubf': 3,
u'scn': 3,
u'+#x': 3,
u'mnt': 3,
u'\xe5al': 3,
u'mnc': 3,
u'q#p': 3,
u'q#l': 3,
u's\xedo': 3,
u'\xf6ri': 3,
u'\xe5lh': 3,
u'to\u0161': 3,
u'm\xe5m': 3,
u'nhf': 3,
u'nht': 3,
u'toq': 3,
u'cyf': 3,
u'\xfcrh': 3,
u'\xfcrm': 3,
u'muy': 3,
u'\u043d\u0430#': 3,
u'+#g': 3,
u'hsl': 3,
u'awp': 3,
u'ppb': 3,
u'ahp': 3,
u'ah\xe9': 3,
u'llj': 3,
u'#\xe1d': 3,
u'll\xed': 3,
u'ro\u0144': 3,
u'g#\u03b2': 3,
u'\u03b4\u03c1\u03af': 3,
u'\u03b9#\u03b1': 3,
u'\xadmy': 3,
u'ahc': 3,
u'ahf': 3,
u'gtz': 3,
u'ia\u010d': 3,
u'a\xf8r': 3,
u'#+h': 3,
u'#+r': 3,
u'a\xf8j': 3,
u'h\xe2t': 3,
u'vt\xe5': 3,
u'g#=': 3,
u'uyv': 3,
u'fck': 3,
u'+m\xe5': 3,
u'+m\xf8': 3,
u'nln': 3,
u'mqu': 3,
u'p\u0117d': 3,
u's+#': 3,
u'ocq': 3,
u'\xad#m': 3,
u'\xad#e': 3,
u'g\xe5g': 3,
u'\xad#s': 3,
u'yhi': 3,
u'\xfd#e': 3,
u'\u01ceu#': 3,
u'ehw': 3,
u'u\xeat': 3,
u'#\xe5t': 3,
u'hwy': 3,
u'ptv': 3,
u'al+': 3,
u'llw': 3,
u'oyf': 3,
u'\u01cene': 3,
u'e\xf8#': 3,
u'\u03b1\u03c3\u03b7': 3,
u'wlr': 3,
u'\xf6zg': 3,
u'gpb': 3,
u'#rn': 3,
u'jej': 3,
u'gp\xe5': 3,
u'jsj': 3,
u'rt\xe4': 3,
u'ie\xdf': 3,
u'\xe6a#': 3,
u'\xf2##': 3,
u'uuo': 3,
u'c#z': 3,
u'\xf2#e': 3,
u'\u0161en': 3,
u'\u03c4\u03ac#': 3,
u'qal': 3,
u's\xf8a': 3,
u'hge': 3,
u'i#\u017e': 3,
u'ixl': 3,
u'ixt': 3,
u'r\xe4t': 3,
u'ed\xe9': 3,
u'v\xeda': 3,
u'bds': 3,
u'si\xe6': 3,
u'#\u010di': 3,
u'\xba#s': 3,
u'\u0161tu': 3,
u'\xf3##': 3,
u'joj': 3,
u'g+k': 3,
u'rpg': 3,
u'rpn': 3,
u'm\xe9h': 3,
u'vtz': 3,
u'rp\xf8': 3,
u'\xe6em': 3,
u'm\xe9\xe5': 3,
u'\u0148s#': 3,
u'tcp': 3,
u'tcc': 3,
u'\xe1tk': 3,
u'\xe1th': 3,
u'nd\xf3': 3,
u'#m\u01ce': 3,
u's\xfcb': 3,
u'\u0123ir': 3,
u's#\xf3': 3,
u'myi': 3,
u'bcc': 3,
u'mye': 3,
u'\xf8rc': 3,
u'\xf8r\xe6': 3,
u'\xf5n#': 3,
u'hoy': 3,
u'yub': 3,
u'pr\xe8': 3,
u'upg': 3,
u'mdl': 3,
u'#\u03c5\u03c0': 3,
u'f\xfch': 3,
u'\u0119re': 3,
u'\xe9p\xf4': 3,
u'\u03af\u03b1\u03c3': 3,
u'\xe9pe': 3,
u'\xf3\xf1e': 3,
u'\xe1lo': 3,
u'dh\xe9': 3,
u'i\u0161#': 3,
u'\xe1lf': 3,
u'prh': 3,
u'\xeame': 3,
u'ol\xe8': 3,
u'y\u0142a': 3,
u'g#\u015b': 3,
u'qic': 3,
u'khd': 3,
u'nxd': 3,
u'nxb': 3,
u'ydj': 3,
u'ydy': 3,
u'ipg': 3,
u'\xf8vf': 3,
u'fpf': 3,
u'dyi': 3,
u'k\xf8t': 3,
u'\u0151fi': 3,
u'vff': 3,
u'vbs': 3,
u'axl': 3,
u'xm\xe5': 3,
u'\xfarg': 3,
u'\xe9to': 3,
u'\xe9tu': 3,
u'wpp': 3,
u'u#\u0161': 3,
u'gdv': 3,
u'ac\xe9': 3,
u'ddp': 3,
u'ddu': 3,
u'ddf': 3,
u'acg': 3,
u'uif': 3,
u'zms': 3,
u'\u0117#t': 3,
u'\u0117#j': 3,
u'\xfcnd': 3,
u'\xe9\xe5n': 3,
u'vi\xe3': 3,
u'vi\xf3': 3,
u'p\xf4t': 3,
u'=#b': 3,
u'ft\xf8': 3,
u'it+': 3,
u'trr': 3,
u'l\xe8#': 3,
u'\xe1g#': 3,
u'l\xe8g': 3,
u'hgh': 3,
u'o\u0144#': 3,
u'\xf5es': 3,
u'se\xad': 3,
u'\xf8of': 3,
u'mlc': 3,
u'n\xe1c': 3,
u'n\xe1i': 3,
u'n\xe1#': 3,
u'\xe9ho': 3,
u'\xe8ze': 3,
u'\xe4fi': 3,
u'jcc': 3,
u'od\xf8': 3,
u'od\xe5': 3,
u'ctn': 3,
u't\xfcb': 3,
u'npy': 3,
u'vmu': 3,
u'p\xf8v': 3,
u'p\xf8#': 3,
u's\xe8v': 3,
u's\xe8n': 3,
u'ihy': 3,
u'ihf': 3,
u'ck\xfd': 3,
u'ckg': 3,
u'vmk': 3,
u'\xe8nc': 3,
u'rvb': 3,
u't\u0173t': 3,
u'cbh': 3,
u'cbt': 3,
u'g\xfce': 3,
u'et\u0151': 3,
u'phv': 3,
u'qiy': 3,
u'xek': 3,
u'xes': 3,
u'\xdf##': 3,
u'mhg': 3,
u'mhv': 3,
u'hps': 3,
u'hpo': 3,
u'\xe1rs': 3,
u'\xe9ly': 3,
u'\xe4be': 3,
u'la\xe6': 3,
u'i\xf3t': 3,
u'uah': 3,
u'\xecgl': 3,
u'\xf2be': 3,
u'c\xe8#': 3,
u't\xe1i': 3,
u'o\u0161t': 3,
u'\xf6sz': 3,
u'\xf6si': 3,
u'nbw': 3,
u's\xec#': 3,
u'd\xf3n': 3,
u'ilq': 3,
u'\xf3pr': 3,
u'th\xe1': 3,
u'epb': 3,
u'bpu': 3,
u'ykm': 3,
u'#nb': 3,
u'm\u01cen': 3,
u'\xf8gv': 3,
u'xy#': 3,
u'mtt': 3,
u'mtf': 3,
u'aow': 3,
u'ghw': 3,
u'ghf': 3,
u'ghm': 3,
u'rl\xa3': 3,
u'gbc': 3,
u'\xf2n#': 3,
u'lmz': 3,
u'zaz': 3,
u'zay': 3,
u'\u015f#t': 3,
u'\u015f#h': 3,
u'\u015f#n': 3,
u'wcs': 3,
u'\xf6ws': 3,
u'jt\xf8': 3,
u'\u016bto': 3,
u'gw#': 3,
u'\u03ba\u03b1\u03c4': 3,
u'm\xe0s': 3,
u'gb\xe6': 3,
u'uxf': 3,
u'\xe6bf': 3,
u'uxm': 3,
u'cck': 3,
u'b#\xe9': 3,
u'xve': 3,
u'\u017a#i': 3,
u'izp': 3,
u'mpy': 3,
u'o\xebn': 3,
u'#\u03b9p': 3,
u'hhr': 3,
u'n+b': 3,
u'#\u015f\u0131': 3,
u'st\u0173': 3,
u'jho': 3,
u'li\u0161': 3,
u'rb\xf3': 3,
u'liw': 3,
u'dt\xe5': 3,
u'st\xfc': 3,
u'\u0430\u044f#': 3,
u'n\u010di': 3,
u'b\xe9h': 3,
u'\u0103#o': 3,
u'\u03bf\u03c7\u03ae': 3,
u'hbl': 3,
u'jpu': 3,
u'gsw': 3,
u'vyd': 3,
u'id\xe6': 3,
u'cgv': 3,
u'na\xf8': 3,
u'l\xf8m': 3,
u'kct': 3,
u'\xe4ts': 3,
u'#\u03ba\u03b1': 3,
u'u\xe5n': 3,
u'u\xe5r': 3,
u'l\xe5a': 3,
u'l\xe5k': 3,
u'hlr': 3,
u'ecj': 3,
u'd+c': 3,
u'pya': 3,
u'lu\xe7': 3,
u'\u0173tt': 3,
u'wkl': 3,
u'\xe9sz': 3,
u'\xe9su': 3,
u'\xe9so': 3,
u'jll': 3,
u'+to': 3,
u'#\u0123i': 3,
u'rq#': 3,
u'dm\xe4': 3,
u'p+a': 3,
u'\u015f\u0131k': 3,
u'n#\u03b1': 3,
u'\u0142es': 3,
u'upw': 3,
u'#\u03b1\u03c0': 3,
u'#l\xe8': 3,
u'is\xf5': 3,
u'is\xe8': 3,
u'n#\u015b': 3,
u'n#\u0151': 3,
u'xun': 3,
u'n#\xb0': 3,
u'a\u010do': 3,
u'a\u010da': 3,
u'baq': 3,
u'\u03b9pp': 3,
u'\xe9u#': 3,
u'sls': 3,
u'slc': 3,
u'l\xf6d': 3,
u'la\u015f': 3,
u'\xf8ho': 3,
u'sl\xe4': 3,
u'n\xeem': 3,
u'\xe2te': 3,
u'wwa': 3,
u'wwo': 3,
u'op\xf8': 3,
u'tnp': 3,
u'dii': 3,
u'\xf1sk': 3,
u'di\xe9': 3,
u'di\xe1': 3,
u'l+#': 3,
u'ul\xf9': 3,
u'fl\xf6': 3,
u'a\u015bn': 3,
u'g=p': 3,
u'\u010d#i': 3,
u'vja': 3,
u'\u010d#e': 3,
u'g\xe8s': 3,
u'\xe1bh': 3,
u'\xe6su': 3,
u'\xe6sv': 3,
u'\xe6sy': 3,
u'r\xe4f': 3,
u'd#\u0151': 3,
u'd#\xe0': 3,
u'd#\xf3': 3,
u'd#\xba': 3,
u'hdt': 3,
u'paj': 3,
u'f\xe1t': 3,
u'\xeb#f': 3,
u'pa\xe5': 3,
u'\xa3#a': 3,
u't\u0151f': 3,
u'i\u010de': 3,
u'\u03c5\u03bd\u03b5': 3,
u'i\u010dk': 3,
u'a\u0161a': 3,
u'hyu': 3,
u'wsw': 3,
u'abv': 3,
u'ggt': 3,
u't\xf8#': 3,
u'jdk': 3,
u'jdj': 3,
u'ryj': 3,
u'csk': 3,
u'csg': 3,
u'de\xe9': 3,
u'l\xe9i': 3,
u'uhf': 3,
u'b\xe6s': 3,
u'zjo': 3,
u'zje': 3,
u'h\xe8r': 3,
u'koy': 3,
u'ujv': 3,
u'#d+': 3,
u'ujs': 3,
u'rrt': 3,
u'ko\xf8': 3,
u'fgs': 3,
u'\xf6hr': 3,
u'ikz': 3,
u'l\xe9z': 3,
u'fuv': 3,
u'=pr': 3,
u'pe\xe5': 3,
u'\u0117da': 3,
u'bi\xe8': 3,
u'r\xf2n': 3,
u'sdl': 3,
u'l\xa3#': 3,
u'e=p': 3,
u'\u0103li': 3,
u'p\xe9s': 3,
u'\xe9or': 3,
u'\xe9op': 3,
u'\xe9ol': 3,
u'\xf8lk': 3,
u're\xf1': 3,
u'lfj': 3,
u'zfo': 3,
u'd\xf3\xf1': 3,
u'fct': 3,
u'vr#': 3,
u'\xfck#': 3,
u'#xs': 3,
u'vrt': 3,
u'\xe5nb': 3,
u'\xe5nu': 3,
u'ysb': 3,
u'fyd': 3,
u'ka\xf8': 3,
u'njs': 3,
u'nj#': 3,
u'#b\xf3': 3,
u'pi\xe8': 3,
u'i\xdfe': 3,
u'i\xdf#': 3,
u'\xfcdt': 3,
u'en\xed': 3,
u'en\xad': 3,
u'\xe9ci': 3,
u'\xe9cl': 3,
u'goh': 3,
u'goj': 3,
u'ra\u010d': 3,
u'ra\xf8': 3,
u'p#z': 3,
u'thh': 3,
u'\u03c1\u03af\u03b1': 3,
u'\xedso': 3,
u'\u043a\u0440\u0430': 3,
u'#g+': 3,
u'lct': 3,
u'vvs': 3,
u'lcd': 3,
u'+kr': 3,
u'jyr': 3,
u'ywe': 3,
u'ti\u0161': 3,
u'\u03c4\u03b9\u03c2': 3,
u'nn\xed': 3,
u'\xe1ni': 3,
u'nnw': 3,
u'nnr': 3,
u'j\xe9g': 3,
u'#vx': 3,
u'\u03bd\u03b5\u03b4': 3,
u'##$': 3,
u'yju': 3,
u'r\xfa#': 3,
u'r\xfar': 3,
u'csf': 3,
u's\u014fn': 3,
u'ejj': 3,
u'a\u0142e': 3,
u'an\xb4': 3,
u'\xe9g#': 3,
u'n\xf6#': 3,
u'ej\u010d': 3,
u'an+': 3,
u'#v\xf6': 3,
u'\u03c7\u03ae#': 3,
u'uhv': 3,
u'j\xf3s': 3,
u'wba': 3,
u'gvu': 3,
u'p\xe1d': 3,
u'juh': 3,
u'cbb': 3,
u'cbk': 3,
u'rvm': 3,
u'rvp': 3,
u'\u010dor': 3,
u'teq': 3,
u'r#\xad': 3,
u'jvf': 3,
u'\xadni': 3,
u'fke': 3,
u'd\xe5#': 3,
u'c\xf3g': 3,
u'ef\xe5': 3,
u'ef\xad': 3,
u'ar\xf2': 3,
u'bfe': 3,
u'bfu': 3,
u'kd\xe5': 3,
u'hiw': 3,
u'hi\xe7': 3,
u'ri\xf3': 3,
u'duz': 3,
u'p+p': 3,
u'p+i': 3,
u'ljb': 3,
u'uzy': 3,
u'#t\u0103': 3,
u'lj\xe6': 3,
u'#t\xf6': 3,
u'wny': 3,
u'#tp': 3,
u'\u0119ba': 3,
u'+cv': 3,
u'ibc': 3,
u'cfe': 3,
u'fey': 3,
u'\xe6gv': 3,
u'\xe8#p': 3,
u'l\xf9#': 3,
u'ic\u01ce': 3,
u'\xe8#o': 3,
u'vcv': 3,
u'\xfcde': 3,
u'nf\xe9': 3,
u'h#\xe5': 3,
u'\u03c3\u03c5\u03bd': 3,
u'd\xe1#': 3,
u'd\xe1r': 3,
u'eb\xe1': 3,
u'eb\xe9': 3,
u'#\u03c3\u03c5': 3,
u'#\u03c3\u03c4': 3,
u't+#': 3,
u'##\u03bf': 3,
u'mfb': 3,
u'mfv': 3,
u'skw': 3,
u'sk\xad': 3,
u'\u03b9\u03c2#': 3,
u'\xe9rk': 2,
u'\xe9rm': 2,
u'\xe7\u0131#': 2,
u'rlm': 2,
u'p\xe9n': 2,
u'p\xe9z': 2,
u'\xe4lk': 2,
u'jmu': 2,
u'xto': 2,
u'jmi': 2,
u'\u03bf\u03c2#': 2,
u'uoa': 2,
u'uog': 2,
u'\xedba': 2,
u'#\u043d\u0430': 2,
u'\xe1zl': 2,
u'\xe1zq': 2,
u'kfy': 2,
u'zn\xfd': 2,
u'#\xb5m': 2,
u'nzs': 2,
u'nzk': 2,
u'\u0161\xedn': 2,
u'\u0161\xedc': 2,
u'e\u0144#': 2,
u'nz\xe9': 2,
u'cuu': 2,
u'f\u0173r': 2,
u'cuf': 2,
u'frm': 2,
u'cuc': 2,
u'tzn': 2,
u'a\u0142g': 2,
u'bnh': 2,
u'k\xf6v': 2,
u'k\xf6y': 2,
u'\xefna': 2,
u'ha\xe5': 2,
u'ha\xe6': 2,
u'ha\xfc': 2,
u'\u0144#v': 2,
u'azk': 2,
u'\xf1of': 2,
u'xka': 2,
u'#\u0432#': 2,
u'zr\xf8': 2,
u'lrt': 2,
u'os\xec': 2,
u'lr\xe6': 2,
u't\xe9t': 2,
u't\xe9b': 2,
u'voz': 2,
u'flk': 2,
u'jie': 2,
u'jiy': 2,
u'on\u010d': 2,
u'ukm': 2,
u'rzg': 2,
u'rzj': 2,
u'on\xf3': 2,
u'\xednd': 2,
u'zoa': 2,
u'zoc': 2,
u'zoh': 2,
u'zoi': 2,
u'zoj': 2,
u'$#o': 2,
u'e\xe9r': 2,
u'c\u0153u': 2,
u'#c\xe0': 2,
u'vk\xe6': 2,
u'zhv': 2,
u't#\u03bd': 2,
u't#\u03b9': 2,
u't#\u03b5': 2,
u'\xfcld': 2,
u'a\xedl': 2,
u'a\xeda': 2,
u'iv\xe9': 2,
u'vuf': 2,
u'yzi': 2,
u'ivj': 2,
u'ivy': 2,
u'\u04d5tt': 2,
u'\xf4t\xe9': 2,
u't#\u010d': 2,
u'fvu': 2,
u'ezm': 2,
u'\xf4t#': 2,
u'bjd': 2,
u'\xe8ce': 2,
u'\xe8ch': 2,
u't#\xba': 2,
u'#\xf3n': 2,
u'dsw': 2,
u'scc': 2,
u'smn': 2,
u'g\xf6d': 2,
u'smf': 2,
u'\xf6#k': 2,
u'smb': 2,
u'\xf6#s': 2,
u'\xf6#t': 2,
u'\xf6#u': 2,
u'f\xe6h': 2,
u'mn\xe6': 2,
u'xol': 2,
u'\u0119to': 2,
u'\u0161pe': 2,
u'\xebns': 2,
u'vlj': 2,
u'vln': 2,
u'vlb': 2,
u'\xe9ja': 2,
u'hzf': 2,
u'hzo': 2,
u'wrs': 2,
u'\u013eub': 2,
u'#=r': 2,
u'\u03bfc#': 2,
u'aae': 2,
u'aaj': 2,
u'aaz': 2,
u'#\u013a\xf1': 2,
u'\u013ea#': 2,
u'\u0163oc': 2,
u'i\xf9n': 2,
u'e\xefd': 2,
u'\xe0pi': 2,
u'smc': 2,
u'crt': 2,
u'rfk': 2,
u'\xf3gi': 2,
u'zke': 2,
u'\xf6n\xf6': 2,
u'cr\xfa': 2,
u'rf\xe9': 2,
u'nrk': 2,
u'nrm': 2,
u'nrl': 2,
u'#g\xa2': 2,
u'a\xf1#': 2,
u'a\xf1s': 2,
u'nr\xe6': 2,
u'h\xe9b': 2,
u'h\xe9f': 2,
u'\u0105g#': 2,
u'knk': 2,
u'knv': 2,
u'p\xf6d': 2,
u'p\xf6l': 2,
u'\u0161#g': 2,
u'\u0161#o': 2,
u'\u0161#z': 2,
u's\xf6t': 2,
u'ijr': 2,
u'\xe6ty': 2,
u'\xe6ta': 2,
u'\xe6tf': 2,
u'\xe6tk': 2,
u'ciw': 2,
u'\u0159\xedm': 2,
u'ci\xe1': 2,
u'u\xadp': 2,
u'\xe8gr': 2,
u'trj': 2,
u'trc': 2,
u'bv\xe5': 2,
u'\u013e#i': 2,
u'\u013e#j': 2,
u'\u013e#m': 2,
u'\xf8al': 2,
u'r\xf3d': 2,
u'r\xf3v': 2,
u'r\xf3p': 2,
u'mji': 2,
u'lzi': 2,
u'\xe1dl': 2,
u'\xe1#g': 2,
u'\xe1#j': 2,
u'\xe9nt': 2,
u't\xe1z': 2,
u'\xe9nm': 2,
u'\xe9ni': 2,
u'#\xea\xe1': 2,
u'rjy': 2,
u'\xe1#\u013e': 2,
u'dbd': 2,
u'sz\xf3': 2,
u'db\xf6': 2,
u'f#\xed': 2,
u'#\u013eu': 2,
u'ucg': 2,
u'zg\xfc': 2,
u'lgv': 2,
u'e\xe1#': 2,
u'n\xadb': 2,
u'n\xadh': 2,
u'ofj': 2,
u'f#\xa3': 2,
u'cv\xe6': 2,
u'a#\u0142': 2,
u'a#\u017c': 2,
u'nvd': 2,
u'nvb': 2,
u'nvn': 2,
u'kr\xfc': 2,
u'kr\xe1': 2,
u'kr\xe4': 2,
u'vsw': 2,
u'vsz': 2,
u'in\u015b': 2,
u'we\u0142': 2,
u'p\xfaz': 2,
u'in\xe4': 2,
u'in+': 2,
u'a#\xad': 2,
u'\u017e#e': 2,
u'cmb': 2,
u'cmy': 2,
u'\xe1im': 2,
u'\xe1id': 2,
u'br\xed': 2,
u'\xe4ng': 2,
u'nk\xf6': 2,
u'er\xe1': 2,
u'dhb': 2,
u'\xeb#e': 2,
u'\xeb#h': 2,
u'\xf8eu': 2,
u'\xeb#v': 2,
u'pj\xe6': 2,
u'#zn': 2,
u'mvm': 2,
u'o\xed#': 2,
u'\xed\xe1\xf4': 2,
u'\xe7uk': 2,
u'\xf9#e': 2,
u'k\xe9h': 2,
u'k\xe9b': 2,
u'em\xe4': 2,
u'v\xe4s': 2,
u'\u0117#e': 2,
u'em\xfc': 2,
u'#\xeel': 2,
u'v\xe4l': 2,
u'\xe8ov': 2,
u'#\u0142\u0105': 2,
u'hr\u0161': 2,
u'y\xe6n': 2,
u'ai\xfa': 2,
u'gn\xf8': 2,
u'gn\xe5': 2,
u'wa\u015b': 2,
u'f\xf3s': 2,
u'r\u0161e': 2,
u'lc\xf3': 2,
u'k#q': 2,
u'wao': 2,
u'\xedri': 2,
u'dlk': 2,
u'h\xe1n': 2,
u'h\xe1r': 2,
u'umw': 2,
u'j\xf6v': 2,
u'i\u0161e': 2,
u'gu\xeb': 2,
u'gu\xed': 2,
u'fbs': 2,
u'caj': 2,
u'fb\xe5': 2,
u'\xe1ma': 2,
u'z\xf3w': 2,
u'z\xf3l': 2,
u'ib\xf3': 2,
u'yie': 2,
u'yid': 2,
u'j\xe6k': 2,
u'j\xe6d': 2,
u'pn\xe6': 2,
u'mrt': 2,
u'mrp': 2,
u'mrd': 2,
u'mrg': 2,
u'mrb': 2,
u'\u011b\u0159i': 2,
u'vtm': 2,
u'u\xebt': 2,
u'u\xebm': 2,
u'\xa3se': 2,
u'azv': 2,
u'qil': 2,
u'khv': 2,
u'\xe9f\xe9': 2,
u'\u0153ur': 2,
u'a\u0107#': 2,
u'rwc': 2,
u'khf': 2,
u'\xe9fa': 2,
u'\xe9fi': 2,
u'#v\xee': 2,
u'srr': 2,
u't#\u20ac': 2,
u'\xed#z': 2,
u'\xed#t': 2,
u'\xfaas': 2,
u'\xed#q': 2,
u'\xf8#\xe6': 2,
u'p\u0159e': 2,
u'#s\u0142': 2,
u'v\xee\xed': 2,
u'kzo': 2,
u'+ne': 2,
u'h\xf3i': 2,
u'ifz': 2,
u'\u0163at': 2,
u'uv\xe9': 2,
u'ff\xe9': 2,
u'ce\xe6': 2,
u'ka\u0161': 2,
u'ga\xeb': 2,
u'ncm': 2,
u'ncn': 2,
u'ncc': 2,
u'ncd': 2,
u'\xf3ve': 2,
u'h\xfai': 2,
u'ncp': 2,
u'tfg': 2,
u'\u015f\xe7i': 2,
u'bzp': 2,
u'\xf3v\xe1': 2,
u'qba': 2,
u'g\xe9e': 2,
u'tfp': 2,
u'm#\xe1': 2,
u'\u0173#n': 2,
u'\xdfna': 2,
u'm#\u0163': 2,
u'n\xf3r': 2,
u'n\xf3t': 2,
u'c\xf2f': 2,
u'i\u0146\u0161': 2,
u'#\u03bb\u03ae': 2,
u'#\xe6o': 2,
u'bgo': 2,
u'nc\xed': 2,
u'\u0161uj': 2,
u'aqb': 2,
u'\xf5ry': 2,
u'pu\u0142': 2,
u'i\xe9s': 2,
u'i\xe9e': 2,
u'\u0173re': 2,
u'\xe4te': 2,
u'tmk': 2,
u'tmm': 2,
u'\u0146\u0161#': 2,
u'g\xe6d': 2,
u'ozm': 2,
u'ozh': 2,
u'n\u0103s': 2,
u'wia': 2,
u'wio': 2,
u'e+m': 2,
u'e+a': 2,
u'fpd': 2,
u'fpg': 2,
u'#wm': 2,
u'#wl': 2,
u'\xf6yk': 2,
u'#wb': 2,
u'#wd': 2,
u'#w\xeb': 2,
u'cyg': 2,
u'rsw': 2,
u'rsq': 2,
u'm\xeam': 2,
u'\u0142go': 2,
u'\xfa#g': 2,
u'\xfa#l': 2,
u'd#\xad': 2,
u'\xfa#o': 2,
u'tbk': 2,
u'tbm': 2,
u'\xfces': 2,
u'ng\xf3': 2,
u'jiv': 2,
u'#jn': 2,
u'\xf6be': 2,
u'#jr': 2,
u'djn': 2,
u'r\xe3e': 2,
u'rc\xf3': 2,
u'xsp': 2,
u'xsc': 2,
u'xsk': 2,
u'#\u03bfc': 2,
u'l\xe7a': 2,
u'l\xe7u': 2,
u'l=p': 2,
u'k\xf5i': 2,
u'\xe8l#': 2,
u'w\xe4g': 2,
u'w\xe4h': 2,
u'w\xe4r': 2,
u'\u043e\u0439#': 2,
u'hnn': 2,
u'bcu': 2,
u'hce': 2,
u'e\u010di': 2,
u'dr\xf3': 2,
u'dr\xf4': 2,
u'\xfeu#': 2,
u'drp': 2,
u'drk': 2,
u'drn': 2,
u'ov\u0148': 2,
u'lwy': 2,
u'e\xf1\xf3': 2,
u'n\xe8s': 2,
u'me\u010d': 2,
u'##\u03b1': 2,
u'dyf': 2,
u'b\xf3r': 2,
u'b\xf3b': 2,
u'fmu': 2,
u'fmc': 2,
u'c\u0103#': 2,
u'd\xb4e': 2,
u'p\xeac': 2,
u'\u03c2#\u03c3': 2,
u'xfl': 2,
u'\xe6hi': 2,
u'\xe6he': 2,
u'\xf8dz': 2,
u'\xf3nt': 2,
u'\xf3nr': 2,
u'l\u0153i': 2,
u'e\xb4s': 2,
u'yez': 2,
u'yew': 2,
u'l#\xad': 2,
u'\xe5#$': 2,
u'#n\xee': 2,
u'\u013a\xf1i': 2,
u'\xe6fu': 2,
u'fs\xe5': 2,
u'vpv': 2,
u'na\u0161': 2,
u'\xf6rb': 2,
u'fsj': 2,
u'\xf6rd': 2,
u'vpm': 2,
u'bo\xf8': 2,
u'cgi': 2,
u'e#\u03c5': 2,
u'\xf3\u0142k': 2,
u'cge': 2,
u'ma\u017e': 2,
u'cgp': 2,
u'ma\xad': 2,
u'i\xe1m': 2,
u'i\xe1l': 2,
u'maq': 2,
u'e#\u0161': 2,
u'\xebme': 2,
u'e#\u0151': 2,
u'or\xeb': 2,
u'tbs': 2,
u'c\xe1i': 2,
u'zsu': 2,
u'zsp': 2,
u'zsy': 2,
u'e#\xfc': 2,
u'v\u0119r': 2,
u'e#\xba': 2,
u'\u0159er': 2,
u'y\xe9s': 2,
u'jfk': 2,
u'jff': 2,
u'oip': 2,
u'\xe6lu': 2,
u'#\xdf\xf8': 2,
u'\xedm#': 2,
u'l#\u0142': 2,
u'\xedma': 2,
u'\xfand': 2,
u'h\xf6h': 2,
u'\xef#i': 2,
u'\xef#l': 2,
u'\xef#n': 2,
u'\xef#p': 2,
u'e\u015fl': 2,
u'r\xebs': 2,
u'\xdfba': 2,
u'\xe1hy': 2,
u'\xfd#i': 2,
u'\xe7is': 2,
u'svp': 2,
u'ajv': 2,
u'iuh': 2,
u'iuo': 2,
u'iub': 2,
u'ajc': 2,
u'goq': 2,
u'#h\xa2': 2,
u'eyw': 2,
u'#c\u0153': 2,
u'bk\xf8': 2,
u'wi\u015b': 2,
u'qq#': 2,
u'bk#': 2,
u'ey\xe6': 2,
u'wb#': 2,
u'hft': 2,
u'hfb': 2,
u'hfe': 2,
u'\xe4#j': 2,
u'\xe4#d': 2,
u'\xe4#a': 2,
u'd=d': 2,
u'\xe4#p': 2,
u'pcv': 2,
u'pcn': 2,
u'sb\xfc': 2,
u'j\xf3v': 2,
u'vb\xe5': 2,
u'r\xf4m': 2,
u'sbp': 2,
u'sbt': 2,
u'\u010d\xedk': 2,
u'\u03c1#p': 2,
u'o#\xe1': 2,
u'o#\xe0': 2,
u'\xf8nj': 2,
u'v\u0148a': 2,
u'\u013e#e': 2,
u'\xee\xede': 2,
u'dgb': 2,
u'syw': 2,
u'dgx': 2,
u'cuv': 2,
u'rg\xfc': 2,
u'ufk': 2,
u'ufc': 2,
u'ufd': 2,
u'h\xe1#': 2,
u'oee': 2,
u'a\u0144c': 2,
u'c\xf8l': 2,
u'b\xe4r': 2,
u'\u017adz': 2,
u'zhb': 2,
u'tvj': 2,
u's\xe9s': 2,
u's\xe9c': 2,
u's\xe9i': 2,
u's\xe9l': 2,
u'\xf6nt': 2,
u'vlv': 2,
u'e\u015bn': 2,
u'+er': 2,
u'\u03c3\u03b7#': 2,
u'chc': 2,
u'd\xf6l': 2,
u'f\xadt': 2,
u'd\xf6b': 2,
u'jg#': 2,
u'ch\xfa': 2,
u'r=d': 2,
u't#=': 2,
u'#\xf6t': 2,
u'#\xf6n': 2,
u'#\xf6g': 2,
u'#\xa3s': 2,
u'pgo': 2,
u'mi\xe9': 2,
u'\xefa#': 2,
u'\xe2la': 2,
u'\xfcvo': 2,
u'pg\xe6': 2,
u'\xdfun': 2,
u'xd#': 2,
u'\u0103n\u0103': 2,
u'xdo': 2,
u'##\u20ac': 2,
u'l\xad#': 2,
u'\xf8b\xe5': 2,
u'#\xed\xe1': 2,
u'n\xe4t': 2,
u'ohw': 2,
u'v\xe1x': 2,
u'a\u013ea': 2,
u't\xe2r': 2,
u'u\u0161u': 2,
u'u\u0161k': 2,
u'u\u0161a': 2,
u'ad\xe1': 2,
u'\xe6#r': 2,
u'rfh': 2,
u'#\u0434\u0435': 2,
u'#\u0434\u043e': 2,
u'rcm': 2,
u'ub\xe9': 2,
u'rxb': 2,
u'e\xe0p': 2,
u'oax': 2,
u'ubh': 2,
u'nwh': 2,
u'#qo': 2,
u'scd': 2,
u'mnl': 2,
u'q#r': 2,
u'q#t': 2,
u'\u03c4\u03bfs': 2,
u'zd\xed': 2,
u'q#j': 2,
u'q#n': 2,
u'vps': 2,
u'g\xf6s': 2,
u'ku\u0142': 2,
u'\xe5lz': 2,
u'\xe5lw': 2,
u'vp\xe6': 2,
u'#z\xef': 2,
u'cl=': 2,
u'cl\xf3': 2,
u'\xf6pe': 2,
u'toz': 2,
u'eqi': 2,
u'\xf4me': 2,
u'\u0430\u0441\u043d': 2,
u'to+': 2,
u'cyu': 2,
u'to\xf1': 2,
u'to\xb4': 2,
u'pke': 2,
u'pkn': 2,
u'xoc': 2,
u'\xfcrs': 2,
u'\xfcra': 2,
u'muq': 2,
u'i+i': 2,
u'\u0151#g': 2,
u'a\xf3#': 2,
u'n\xf8n': 2,
u'\xe9ag': 2,
u'el\xe7': 2,
u'v\xe5l': 2,
u'awu': 2,
u'hsb': 2,
u'awt': 2,
u'hsi': 2,
u'ppd': 2,
u'ahk': 2,
u'ahv': 2,
u'el\u0153': 2,
u'h\xe9c': 2,
u'pp\xe5': 2,
u'\xe6du': 2,
u'\u03c0\u03c1\u03cc': 2,
u'\xf1\xf3n': 2,
u'#\xe1f': 2,
u'ro\xed': 2,
u'p\xf4l': 2,
u'll\u0151': 2,
u'kyc': 2,
u'\xedef': 2,
u'\u03b9#\u03c3': 2,
u'\xfa#i': 2,
u'\xfa#r': 2,
u'ppf': 2,
u'vtb': 2,
u'\xf6ve': 2,
u'\xf6va': 2,
u'#+b': 2,
u'#+u': 2,
u'#+t': 2,
u'a\xf8d': 2,
u'g#$': 2,
u'\xf6v\xe9': 2,
u'yut': 2,
u'm\xe1#': 2,
u'iax': 2,
u'm\xe1t': 2,
u'uy\xe8': 2,
u'l\xf3g': 2,
u'ia\xf8': 2,
u'g#\u013a': 2,
u'fcy': 2,
u'uyb': 2,
u'nlt': 2,
u'nlg': 2,
u'nl\xfc': 2,
u'yh\xe6': 2,
u'yh\xe5': 2,
u'\xad#k': 2,
u'\xad#i': 2,
u'\xad#f': 2,
u'\xad#c': 2,
u'\xad#p': 2,
u'wyj': 2,
u'w\xf6r': 2,
u'ehb': 2,
u's\u04d5t': 2,
u'#\xe5u': 2,
u'#\xe5p': 2,
u'tqu': 2,
u'vyb': 2,
u'su\xe1': 2,
u'vyj': 2,
u'#jt': 2,
u'rkw': 2,
u'\xf1ap': 2,
u'oy\xe9': 2,
u'\xf1al': 2,
u'\xf1ak': 2,
u'lh\xe9': 2,
u'o\xf8s': 2,
u'oyh': 2,
u'\u015flu': 2,
u'lhv': 2,
u'lhr': 2,
u'o\u0144s': 2,
u'e\xf8l': 2,
u'a\xf8s': 2,
u'wlp': 2,
u'wlu': 2,
u'vxn': 2,
u'vx#': 2,
u'#r\xe0': 2,
u'\xdf\xf8s': 2,
u'\xadii': 2,
u'vmt': 2,
u'\xe9#\xe0': 2,
u'\xe9#\xe5': 2,
u'\xe9#\xf8': 2,
u'rtq': 2,
u'a\u0161p': 2,
u'ro\xdf': 2,
u'fgm': 2,
u'\xf2#v': 2,
u'\xf2#n': 2,
u'\xf3is': 2,
u'\xf3ir': 2,
u'\u0151rs': 2,
u'\xf3id': 2,
u'qa#': 2,
u'\xfa\xf1e': 2,
u'#iq': 2,
u'i#\u0142': 2,
u'i#\u0163': 2,
u'i#\u0161': 2,
u'ctt': 2,
u'+++': 2,
u'xpi': 2,
u'ixb': 2,
u'\u010d#b': 2,
u'ixw': 2,
u'i#\xe0': 2,
u'r\xe4g': 2,
u'i#\xf3': 2,
u'l\xe4c': 2,
u'l\xe4b': 2,
u'exv': 2,
u'z\xe9c': 2,
u'z\xe9n': 2,
u'\xa3#t': 2,
u'ojs': 2,
u'bd\xe6': 2,
u'hk\xf8': 2,
u'bdk': 2,
u'bdr': 2,
u'hkp': 2,
u'si\u0107': 2,
u'hkm': 2,
u'ga\xf1': 2,
u'#\u010da': 2,
u'i\xe8c': 2,
u'siq': 2,
u'\xe4u#': 2,
u'ou\xeb': 2,
u'f\xf8p': 2,
u'f\xf8s': 2,
u'lt\xad': 2,
u'lt\xe1': 2,
u'\xf3#l': 2,
u'\xfazc': 2,
u'\xf3#p': 2,
u'#\xe1v': 2,
u'utg': 2,
u'whm': 2,
u's#\u03b9': 2,
u'dl\xe1': 2,
u'#vn': 2,
u'#vb': 2,
u'jo\xeb': 2,
u'\xe7\xfck': 2,
u'dlp': 2,
u'jox': 2,
u'joc': 2,
u'g+t': 2,
u'##\xe8': 2,
u'cxb': 2,
u'rpm': 2,
u'm\xe9i': 2,
u'm\xe9o': 2,
u'vtt': 2,
u'\u010dil': 2,
u'#v\u0119': 2,
u'\u010dia': 2,
u'cx#': 2,
u'\u03b9#f': 2,
u'fkn': 2,
u'\xe6es': 2,
u'fku': 2,
u'\xeddi': 2,
u'tcr': 2,
u'tck': 2,
u'a\xf8v': 2,
u'nd\xed': 2,
u'\xe1tc': 2,
u'\xe1tr': 2,
u'kdu': 2,
u'h\xe9z': 2,
u'vae': 2,
u'\xfcfu': 2,
u'\u0131#e': 2,
u'myu': 2,
u'##\u03bc': 2,
u'#m\xea': 2,
u'#m\xe3': 2,
u'i\u0173#': 2,
u'r\xe0#': 2,
u'xtb': 2,
u'r\xe0d': 2,
u'l\xe0i': 2,
u'auu': 2,
u'\xefle': 2,
u'e\u0161#': 2,
u'md\xe6': 2,
u'\xefl#': 2,
u'ne\xad': 2,
u'ds\xad': 2,
u'mdh': 2,
u'nr\xf3': 2,
u'xir': 2,
u'oqq': 2,
u'\xe8sl': 2,
u'lpm': 2,
u'zta': 2,
u'n\xe9l': 2,
u'\u015fi#': 2,
u'wtr': 2,
u'wth': 2,
u'gxb': 2,
u'jk\xe6': 2,
u'cdt': 2,
u'dhk': 2,
u'\xe4nn': 2,
u'=al': 2,
u'jkw': 2,
u'ol\xf3': 2,
u'um\xe1': 2,
u'fcf': 2,
u'uyf': 2,
u'\xf3as': 2,
u'kh\xe5': 2,
u'\u03c1\u03cc\u03b5': 2,
u'\xe1xd': 2,
u'khp': 2,
u'qid': 2,
u'khj': 2,
u'qiu': 2,
u'qis': 2,
u'nxt': 2,
u'ov\xe0': 2,
u'a\xefc': 2,
u'a\xefa': 2,
u'a\xefn': 2,
u'h\xf3r': 2,
u'e\u0142#': 2,
u'#a\xef': 2,
u'g\xe9m': 2,
u'#a\xe7': 2,
u'ip\xfa': 2,
u'ip\xf8': 2,
u'z\xe1r': 2,
u'dyo': 2,
u'\xe1c#': 2,
u'bl\xfc': 2,
u'sa\u0161': 2,
u'blm': 2,
u'hco': 2,
u'\xe2ne': 2,
u'k\xf8v': 2,
u'\xf5ik': 2,
u'v#\xad': 2,
u'lnm': 2,
u'saz': 2,
u'saq': 2,
u'v#+': 2,
u'bl\u0105': 2,
u'xml': 2,
u'c\xe0#': 2,
u'zpo': 2,
u'n\xede': 2,
u'n\xedb': 2,
u'n\xeda': 2,
u'wpe': 2,
u'b\xfcn': 2,
u'=in': 2,
u'ddn': 2,
u'ddb': 2,
u'acj': 2,
u'\xe6m\xe6': 2,
u'bn\xf8': 2,
u'y\xe8r': 2,
u'\xe6mr': 2,
u'\xe6me': 2,
u'\xe0n#': 2,
u'cpf': 2,
u'cpt': 2,
u'\u0153il': 2,
u'\xedl#': 2,
u'zm\xe3': 2,
u'\u0117#v': 2,
u'\u0117#n': 2,
u'\xfcni': 2,
u'\xf6kn': 2,
u'pvb': 2,
u'kls': 2,
u't\xadn': 2,
u'ty\u0142': 2,
u'yx#': 2,
u'n\u0163o': 2,
u'\xf8sd': 2,
u'dfc': 2,
u'itw': 2,
u'ft\xe5': 2,
u'ft\xe6': 2,
u'r\xe8z': 2,
u'it\xed': 2,
u'it\xe3': 2,
u'trg': 2,
u'#+e': 2,
u'g\xe5l': 2,
u'k\xfc\xe7': 2,
u'\xe1gy': 2,
u'\xe1gu': 2,
u'bhi': 2,
u'bhl': 2,
u'hgo': 2,
u'\xe8ek': 2,
u'o\u0144c': 2,
u'\xefda': 2,
u'pdp': 2,
u'mlp': 2,
u'\u013e#d': 2,
u'ctc': 2,
u'cth': 2,
u'd\xebr': 2,
u'\xe9ha': 2,
u'\xe9he': 2,
u'\u015fah': 2,
u'\u03b2#t': 2,
u'n#$': 2,
u'\u03b2#a': 2,
u'ag\xf8': 2,
u'jce': 2,
u'sx#': 2,
u'gvf': 2,
u'od\xeb': 2,
u'zi\u0107': 2,
u'\xe5ty': 2,
u'ctg': 2,
u'\xf2fo': 2,
u'ziv': 2,
u'zii': 2,
u'np\xe6': 2,
u't\xfcm': 2,
u'\u20ac#i': 2,
u't\xfcc': 2,
u'npc': 2,
u'vmg': 2,
u'#yl': 2,
u'kpc': 2,
u'kpn': 2,
u's\xe8l': 2,
u'ihs': 2,
u'ihh': 2,
u'ihn': 2,
u'ig\xe6': 2,
u'ck\xe9': 2,
u'\xe5op': 2,
u'w\xebn': 2,
u'et\xe0': 2,
u'btd': 2,
u'qta': 2,
u'bth': 2,
u'#ym': 2,
u'et+': 2,
u'a\xadm': 2,
u'a\xadt': 2,
u'\u03b7\u03c2#': 2,
u'g\xfct': 2,
u'\xf8co': 2,
u'mhn': 2,
u'mhl': 2,
u'dvb': 2,
u'c\xf8e': 2,
u'\xdf#\xe6': 2,
u'\xb5m#': 2,
u'mh\xed': 2,
u'\xebtb': 2,
u'\xfc\xe7\xfc': 2,
u'\xe1\xf4\xef': 2,
u't\xe3o': 2,
u'hpp': 2,
u'hpe': 2,
u'pth': 2,
u'ptg': 2,
u'ptc': 2,
u'\xe1rn': 2,
u'glm': 2,
u'la\xfe': 2,
u'\xe5ps': 2,
u'uab': 2,
u'zey': 2,
u'nt\xfa': 2,
u'rr\xea': 2,
u'nt\xed': 2,
u't\xf8l': 2,
u'uoi': 2,
u'ze\u015b': 2,
u'i\u0163a': 2,
u'al\xf3': 2,
u'\u03bf#o': 2,
u'\u03bf#e': 2,
u'm\xe4c': 2,
u'm\xe4r': 2,
u'd\xf3a': 2,
u'il\xe1': 2,
u'il\xf6': 2,
u'm\xe4\xdf': 2,
u'\xf3po': 2,
u'\xb0#s': 2,
u'\xb0#v': 2,
u'\xb0#i': 2,
u'th\xe4': 2,
u'\xe2rg': 2,
u'\xe5#\xb0': 2,
u'\u0435#h': 2,
u'yk\xf6': 2,
u'\xfcsl': 2,
u'pl\xfc': 2,
u'bpc': 2,
u'bph': 2,
u'gng': 2,
u'g\xf8v': 2,
u'g\xf8s': 2,
u'ykg': 2,
u'plr': 2,
u'plh': 2,
u'\xf8gp': 2,
u'd\xe8c': 2,
u'd\xe8s': 2,
u'd\xe8r': 2,
u'mtm': 2,
u'mtg': 2,
u'szm': 2,
u'z\xeel': 2,
u'hty': 2,
u'cwb': 2,
u'\xe8r#': 2,
u'sp\xf3': 2,
u'\xeach': 2,
u'gh\xe6': 2,
u'spv': 2,
u'spj': 2,
u'aoj': 2,
u'ghn': 2,
u'gbb': 2,
u'\u03b5#e': 2,
u'x#q': 2,
u'rlw': 2,
u'wcp': 2,
u'wcr': 2,
u'wch': 2,
u'wck': 2,
u'b\xedo': 2,
u'\u03b9#\u03c4': 2,
u'\u016bts': 2,
u'\u016bth': 2,
u'ytk': 2,
u'vua': 2,
u'vui': 2,
u'vuz': 2,
u'\xe6b\xe6': 2,
u'+li': 2,
u'\xe5go': 2,
u'uxu': 2,
u'\xe6by': 2,
u'uxn': 2,
u'k\xads': 2,
u'ccd': 2,
u'l\xfcc': 2,
u'l\xfcd': 2,
u'\u0161i\u010d': 2,
u'nm#': 2,
u'nmm': 2,
u'o\u010da': 2,
u'a\xe2l': 2,
u'h\xf8t': 2,
u'hnh': 2,
u'yod': 2,
u'\u0148a#': 2,
u'j\xe4g': 2,
u'j\xe4r': 2,
u'mp\xe6': 2,
u'mp\xe9': 2,
u'u\xe9m': 2,
u'u\xe9n': 2,
u'hhe': 2,
u'as\xfa': 2,
u'#\xe4r': 2,
u'p\u012bk': 2,
u'as\xed': 2,
u'#\u015fa': 2,
u'#\u015fi': 2,
u'li\xe7': 2,
u'dtw': 2,
u'st\xe8': 2,
u'tpc': 2,
u'oxv': 2,
u'wow': 2,
u'woi': 2,
u'\u0103#v': 2,
u'\u0103#s': 2,
u'\u0103#m': 2,
u'\u0103#i': 2,
u'\u0103#b': 2,
u'#u\u0161': 2,
u'gs\xad': 2,
u'l#\u03bd': 2,
u'#u\xe5': 2,
u'\u03b7#\u03bb': 2,
u'jpi': 2,
u'=xm': 2,
u'jpc': 2,
u'\xadhe': 2,
u'#uw': 2,
u'#ux': 2,
u'+pa': 2,
u'l#\xa3': 2,
u'id\xe5': 2,
u'cg#': 2,
u'id=': 2,
u'idw': 2,
u'\xf6mm': 2,
u'u\xdfb': 2,
u'na\xee': 2,
u'na\xf1': 2,
u'l\xf8p': 2,
u'z#\xf6': 2,
u'\u0434\u043e\u0431': 2,
u'fdd': 2,
u'l#\u0163': 2,
u'z#y': 2,
u'id\u017e': 2,
u'na+': 2,
u'a\xe6k': 2,
u'kcf': 2,
u'kcj': 2,
u'kcu': 2,
u'kcx': 2,
u'#h\xe1': 2,
u'flv': 2,
u'ycs': 2,
u'du\u0161': 2,
u'r\xe5f': 2,
u'n\u015bw': 2,
u'ki\u0173': 2,
u'#\u03c0\u03c1': 2,
u'u\xe5b': 2,
u'hl\xe5': 2,
u'snt': 2,
u'\xfei#': 2,
u'#\xf8u': 2,
u'd+m': 2,
u'#\xf8\xe5': 2,
u'pye': 2,
u'be\xef': 2,
u'lu\xdf': 2,
u'\u017eio': 2,
u'dpk': 2,
u'\u0105ck': 2,
u'\u03b1\u03b9#': 2,
u'uul': 2,
u'uua': 2,
u'wkk': 2,
u'wka': 2,
u'wkf': 2,
u'ot\xe9': 2,
u'ot\xe0': 2,
u'\u011b#d': 2,
u'zyz': 2,
u'jl\xf8': 2,
u'jl\xe6': 2,
u'ri\xfa': 2,
u'xlo': 2,
u'\xe9sm': 2,
u'e#\u010d': 2,
u'ie\u0142': 2,
u'ie\u0144': 2,
u'\xadta': 2,
u'\xadte': 2,
u'i\u015f#': 2,
u'+tr': 2,
u'dmj': 2,
u't\xec\u0161': 2,
u'm\xe8z': 2,
u'fh#': 2,
u'#\u03c4\u03b7': 2,
u'n#\u03b2': 2,
u'l\u0151#': 2,
u'fhl': 2,
u'upj': 2,
u'neq': 2,
u'\xedce': 2,
u'\xfcgl': 2,
u'\u017ceg': 2,
u'\u017cel': 2,
u'gpm': 2,
u'gph': 2,
u'\u03cc\u03b5\u03b4': 2,
u'r\xe1c': 2,
u'r\xe1l': 2,
u'r\xe1h': 2,
u'r\xe1r': 2,
u'l\xe1\u010d': 2,
u'is\xe9': 2,
u'ak\xf6': 2,
u'n#\u0159': 2,
u'n#\xba': 2,
u'l\xe1h': 2,
u'n#\xa3': 2,
u'l\xe1r': 2,
u'i\u0119t': 2,
u'\xe8n#': 2,
u'v\xf6l': 2,
u'a\u017ad': 2,
u'\xe4r#': 2,
u'mcb': 2,
u'mcr': 2,
u'slv': 2,
u'\xe4rt': 2,
u'\xe4rk': 2,
u'\xe4rb': 2,
u'\xe4re': 2,
u'lqi': 2,
u'sm\xe9': 2,
u'glp': 2,
u'\u0142\u0105c': 2,
u'sl\xe0': 2,
u'\xf4\xef#': 2,
u'wws': 2,
u'wwd': 2,
u'yl\xf8': 2,
u'wwb': 2,
u'wwk': 2,
u'ww#': 2,
u'zun': 2,
u's\u0142u': 2,
u'diw': 2,
u'g\u0117#': 2,
u'sm\xe4': 2,
u'jh\xe6': 2,
u'\u0142a#': 2,
u'ooa': 2,
u'ooo': 2,
u'ooh': 2,
u'\u0142ak': 2,
u'flr': 2,
u'flb': 2,
u'nyx': 2,
u'#\u043e\u0442': 2,
u'\xedor': 2,
u'\xedon': 2,
u'ul\xe9': 2,
u'fl\xe4': 2,
u'\xe4dd': 2,
u'kky': 2,
u'kkv': 2,
u'\u017ca#': 2,
u'\xf6d\xf6': 2,
u'ixh': 2,
u'kk\xe4': 2,
u'ixm': 2,
u'i\u0119#': 2,
u'r\xedb': 2,
u'r\xedd': 2,
u'g\xe8r': 2,
u'#t\xea': 2,
u'tn\xe4': 2,
u'\xe1br': 2,
u'k\u0151#': 2,
u'l\xedb': 2,
u'\xe6sd': 2,
u'd#\u017e': 2,
u'pa\u0142': 2,
u'pa\u017a': 2,
u'pa\xf8': 2,
u'\xe2o#': 2,
u'bmg': 2,
u'bmd': 2,
u'bms': 2,
u'a\u017ee': 2,
u'hdm': 2,
u'hdk': 2,
u'grp': 2,
u'g\xa2r': 2,
u'moq': 2,
u'#\u03b5\u03c5': 2,
u'\xa3#s': 2,
u'\xa3#f': 2,
u'\xe5ba': 2,
u'ab\xe9': 2,
u't\xe4n': 2,
u'\xe9ku': 2,
u'\xe9ka': 2,
u'i\u010do': 2,
u'hye': 2,
u'hyo': 2,
u'ggb': 2,
u'\xe9k\xe9': 2,
u'ggn': 2,
u's\xadi': 2,
u's\xadc': 2,
u'cs\xe9': 2,
u'de=': 2,
u'jdz': 2,
u'jdp': 2,
u'jdr': 2,
u'de\xf1': 2,
u'de\xe1': 2,
u'uh#': 2,
u'e\xe6s': 2,
u'e\xe6l': 2,
u'okw': 2,
u'uhl': 2,
u'uhh': 2,
u'\xedks': 2,
u'#d\xb4': 2,
u'rrd': 2,
u'#d\xf6': 2,
u'a\u015fb': 2,
u'#d\xf3': 2,
u'#d\xfa': 2,
u'vnr': 2,
u'r\xe9r': 2,
u'ikj': 2,
u'\u0142#z': 2,
u'l\xe9d': 2,
u'l\xe9a': 2,
u'\xe1fa': 2,
u'l\xe9k': 2,
u'fub': 2,
u'ewk': 2,
u'ik\u0161': 2,
u'ik\u0151': 2,
u'pe\xf1': 2,
u'\xe0di': 2,
u'ly\xe5': 2,
u'usw': 2,
u'mkn': 2,
u'mkt': 2,
u'sdj': 2,
u'g\xf3r': 2,
u'jzi': 2,
u'lyz': 2,
u'\u0148#m': 2,
u'\u017eer': 2,
u'\u017eel': 2,
u'\u017eei': 2,
u'xba': 2,
u'\xe6gk': 2,
u'e=r': 2,
u'e=x': 2,
u'e=a': 2,
u'e=c': 2,
u'e=o': 2,
u'n\xe6i': 2,
u'af\xed': 2,
u'h\xa2j': 2,
u'\xba#n': 2,
u'\xe9od': 2,
u'p\xf3\u0142': 2,
u'gca': 2,
u'gcr': 2,
u'afz': 2,
u'dgt': 2,
u'lf\xf6': 2,
u'f#\xb0': 2,
u'ar\xe7': 2,
u'l\u0105g': 2,
u're\xad': 2,
u'lfd': 2,
u'm\xfcs': 2,
u're+': 2,
u'f#\u0163': 2,
u'og\xf8': 2,
u'nq#': 2,
u'zfr': 2,
u'\xfcmi': 2,
u'ys\u0142': 2,
u'ka\xef': 2,
u'\xfcmd': 2,
u'\xadca': 2,
u'#xt': 2,
u'vru': 2,
u'#xk': 2,
u'\xe5k\xe6': 2,
u'ysv': 2,
u'cje': 2,
u'cju': 2,
u'ioq': 2,
u'cj#': 2,
u'z\xf6n': 2,
u'fya': 2,
u'fyf': 2,
u'\xfd#t': 2,
u'r\xeb#': 2,
u'\xf3\u017ca': 2,
u'bua': 2,
u'#\u0159\xed': 2,
u'vfb': 2,
u'vcr': 2,
u'mwi': 2,
u'\xf8d\xe5': 2,
u'\u0107#g': 2,
u'\u0107#k': 2,
u'\u0107#p': 2,
u'\u0107#r': 2,
u'\u0107#u': 2,
u'\u0107#t': 2,
u'gnc': 2,
u'f\xe9b': 2,
u'f\xe9d': 2,
u'aih': 2,
u'n\xfa\xf1': 2,
u'aj\xf3': 2,
u'\xe9cs': 2,
u'\xe9cr': 2,
u'\xe9ct': 2,
u'l#$': 2,
u'wsl': 2,
u'wsn': 2,
u'ajt': 2,
u'a\xe7o': 2,
u'xai': 2,
u'ra\xee': 2,
u'ra\xeb': 2,
u'lbd': 2,
u'\xec\u0161\xed': 2,
u'e\xfeu': 2,
u'nu\xdf': 2,
u'ocj': 2,
u'\u0142ub': 2,
u'ocw': 2,
u'oc\xe8': 2,
u'\xedsa': 2,
u'kwf': 2,
u'wfi': 2,
u'wfa': 2,
u'wfs': 2,
u's\xadp': 2,
u'm=#': 2,
u'cn\xe6': 2,
u'ic\xf3': 2,
u'\xfcch': 2,
u'm=i': 2,
u'm=c': 2,
u't\xf3t': 2,
u'ywh': 2,
u'ti\u0107': 2,
u'nn\xe8': 2,
u'\xe1nu': 2,
u'nnj': 2,
u'thg': 2,
u'bq#': 2,
u'pmv': 2,
u'pmf': 2,
u'pmb': 2,
u'pmc': 2,
u'pmm': 2,
u'pmk': 2,
u'jdy': 2,
u'lmq': 2,
u'f\xeds': 2,
u'f\xeda': 2,
u'd\xe9o': 2,
u'd\xe9h': 2,
u'd\xe9z': 2,
u'z\xefl': 2,
u'ryv': 2,
u'ryw': 2,
u'e\xdfu': 2,
u'pr\u0119': 2,
u'an\xe1': 2,
u'pr\xfa': 2,
u'\xe8sa': 2,
u'huz': 2,
u'#v\xf8': 2,
u'rm\xe4': 2,
u'ssx': 2,
u'rmw': 2,
u'f\xf6l': 2,
u'ss\xe8': 2,
u'ss\xe5': 2,
u'=en': 2,
u'\u0119de': 2,
u'\u03b1so': 2,
u'\xf6t\xe9': 2,
u'#p\xf4': 2,
u'wbl': 2,
u'\u03bfs#': 2,
u'gvs': 2,
u'ig\u0117': 2,
u'h\xe4r': 2,
u'h\xe4k': 2,
u'a\xfei': 2,
u'\xe0in': 2,
u'rv\xe4': 2,
u'\u012bks': 2,
u'p\xe1r': 2,
u'ig\xe9': 2,
u'\xadko': 2,
u'\xe1rr': 2,
u'te\xe0': 2,
u'fa+': 2,
u'\xe6cu': 2,
u'nbs': 2,
u'\xf3wo': 2,
u'uw\xe9': 2,
u'r#\xe2': 2,
u'sbn': 2,
u'r#\u015f': 2,
u'jvc': 2,
u'\xfala': 2,
u'\xe6eu': 2,
u'ynf': 2,
u'ynv': 2,
u'izd': 2,
u'c\xf3n': 2,
u'ef\xfc': 2,
u'c\xf3#': 2,
u'r#\u03b5': 2,
u'ar\xe3': 2,
u'ar\xe6': 2,
u'ar\xe8': 2,
u'bfk': 2,
u'bfj': 2,
u'bfn': 2,
u'ri\u0146': 2,
u'ar=': 2,
u'a\xdfn': 2,
u'i\xeat': 2,
u'ri\xe9': 2,
u'duu': 2,
u'p+f': 2,
u'p+h': 2,
u'ac\u0103': 2,
u'riw': 2,
u'uzm': 2,
u'lj\xf6': 2,
u're\xfe': 2,
u'#t\xf5': 2,
u'gr\xfc': 2,
u'gr\xe1': 2,
u'#t\xec': 2,
u'#t\xed': 2,
u'gr\xe8': 2,
u'wnf': 2,
u'wnd': 2,
u'wnh': 2,
u'wnm': 2,
u'wnr': 2,
u'wnv': 2,
u'rr\xed': 2,
u'rr\xeb': 2,
u'rr\xfc': 2,
u'rrm': 2,
u'rrg': 2,
u'ta\u0144': 2,
u'+s#': 2,
u'\u010dka': 2,
u'\u010dko': 2,
u'ibv': 2,
u'ibw': 2,
u'ta\xe5': 2,
u'\xe1vi': 2,
u'\xe1va': 2,
u'\xe8#r': 2,
u'ta\xad': 2,
u'kbc': 2,
u'#k\xf5': 2,
u'kb#': 2,
u'kb\xe5': 2,
u'vcg': 2,
u's\xfa#': 2,
u'\xfcdb': 2,
u'h#\xe6': 2,
u'ybj': 2,
u'ybl': 2,
u'\xf8pu': 2,
u'j\u010de': 2,
u'd\xe1t': 2,
u'#k\u0119': 2,
u'l\xe6f': 2,
u'l\xe6a': 2,
u'l\xe6p': 2,
u'e\xadt': 2,
u'n\xf6m': 2,
u'av\u010d': 2,
u'bb\xe9': 2,
u'mft': 2,
u'owk': 2,
u'cfm': 2,
u'lv\xe1': 2,
})
|
dmort27/pylid
|
pylid/langs/da.py
|
Python
|
mit
| 309,318
|
[
"ADF",
"ASE",
"BWA",
"CDK",
"EPW",
"Elk",
"MOE",
"VMD",
"VTK",
"xTB"
] |
89d7577d7d94a151f654b4b9df086dcaaf18d2447fdaef69bcf4c25e756f660b
|
# Author: Ivan E. Cao-Berg (icaoberg@scs.cmu.edu)
#
# Copyright (C) 2011-2014 Murphy Lab
# Lane Center for Computational Biology
# School of Computer Science
# Carnegie Mellon University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# For additional information visit http://murphylab.web.cmu.edu or
# send email to murphy@cmu.edu
import os
from setuptools import setup
#load the current version number of the package
exec(compile(open('VERSION').read(),'VERSION', 'exec'))
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'ricerca',
version = __version__,
description = ('Content-based image search'),
long_description=read('README'),
author = 'Ivan Cao-Berg',
author_email = 'icaoberg@andrew.cmu.edu',
install_requires = [
'numpy>=1.4.1',
'scipy>=0.7.2'
],
url = 'http://murphylab.web.cmu.edu/software/ricerca',
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Database',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python'],
py_modules=['ricerca.content'])
|
icaoberg/ricerca
|
setup.py
|
Python
|
gpl-3.0
| 2,023
|
[
"VisIt"
] |
c76b126929fcce71bccb1265852a51f976f9a375b06939edd092239c913b54dd
|
# TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']
import numpy as np
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.tools import add_constant, chain_dot, pinv_extended
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom, defined as the rank of the regressor
matrix minus 1 if a constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom, defined as the number of observations
minus the rank of the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, X):
raise NotImplementedError("Subclasses should implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance estimators
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
Returns
-------
A RegressionResults class instance.
See Also
---------
regression.linear_model.RegressionResults
regression.linear_model.RegressionResults.get_robustcov_results
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if ((not hasattr(self, 'pinv_wexog')) or
(not hasattr(self, 'normalized_cov_params')) or
(not hasattr(self, 'rank'))):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np_matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if ((not hasattr(self, 'exog_Q')) or
(not hasattr(self, 'exog_R')) or
(not hasattr(self, 'normalized_cov_params')) or
(getattr(self, 'rank', None) is None)):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np_matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
return RegressionResultsWrapper(lfit)
def fit_regularized(self, method="coord_descent", maxiter=1000,
alpha=0., L1_wt=1., start_params=None,
cnvrg_tol=1e-8, zero_tol=1e-8, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : string
Only the coordinate descent algorithm is implemented.
maxiter : integer
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
ridge regression. If 1, the fit is the lasso.
start_params : array-like
Starting values for ``params``.
cnvrg_tol : scalar
If ``params`` changes by less than this amount (in sup-norm)
in once iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
Returns
-------
A RegressionResults object, of the same type returned by
``fit``.
Notes
-----
The approach closely follows that implemented in the glmnet
package in R. The penalty is the "elastic net" penalty, which
is a convex combination of L1 and L2 penalties.
The function that is minimized is: ..math::
0.5*RSS/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
References
----------
Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
"""
k_exog = self.wexog.shape[1]
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog, dtype=np.float64)
# Below we work with RSS + penalty, so we need to rescale.
alpha *= 2 * self.wexog.shape[0]
if start_params is None:
params = np.zeros(k_exog, dtype=np.float64)
else:
params = start_params.copy()
converged = False
xxprod = 2*(self.wexog**2).sum(0)
# Coordinate descent
for itr in range(maxiter):
params_save = params.copy()
for k in range(self.wexog.shape[1]):
params[k] = 0.
wendog_adj = self.wendog - np.dot(self.wexog, params)
xyprod = 2*np.dot(self.wexog[:,k], wendog_adj)
den = xxprod[k] + alpha[k] * (1 - L1_wt)
a = alpha[k] * L1_wt
if a >= np.abs(xyprod):
params[k] = 0.
elif xyprod > 0:
params[k] = (xyprod - a) / den
else:
params[k] = (xyprod + a) / den
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
converged = True
break
# Set approximate zero coefficients to be exactly zero
params *= np.abs(params) >= zero_tol
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog), dtype=np.float64)
if len(ii) > 0:
model = self.__class__(self.wendog, self.wexog[:,ii])
rslt = model.fit()
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
lfit = RegressionResults(self, params,
normalized_cov_params=cov)
lfit.converged = converged
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model has not yet been fit, params is not optional.
"""
#JP: this doesn't look correct for GLMAR
#SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
class GLS(RegressionModel):
__doc__ = """
Generalized least squares model with a general covariance structure.
%(params)s
sigma : scalar or array
`sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
**Attributes**
pinv_wexog : array
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : array
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : array
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : array
`sigma` is the n x n covariance structure of the error terms.
wexog : array
Design matrix whitened by `cholsigmainv`
wendog : array
Response variable whitened by `cholsigmainv`
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary()))
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
#TODO: add options igls, for iterative fgls if sigma is None
#TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
#store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, X):
"""
GLS whiten method.
Parameters
-----------
X : array-like
Data to be whitened.
Returns
-------
np.dot(cholsigmainv,X)
See Also
--------
regression.GLS
"""
X = np.asarray(X)
if self.sigma is None or self.sigma.shape == ():
return X
elif self.sigma.ndim == 1:
if X.ndim == 1:
return X * self.cholsigmainv
else:
return X * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, X)
def loglike(self, params):
"""
Returns the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array-like
The parameter estimates
Returns
-------
loglike : float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right)
Y and Y-hat are whitened.
"""
#TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim==2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
class WLS(RegressionModel):
__doc__ = """
A regression model with diagonal but non-identity covariance structure.
The weights are presumed to be (proportional to) the inverse of the
variance of the observations. That is, if the variables are to be
transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
%(extra_params)s
Attributes
----------
weights : array
The stored weights supplied as an argument.
See regression.GLS
Examples
---------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, X):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights)
Parameters
----------
X : array-like
Data to be whitened
Returns
-------
sqrt(weights)*X
"""
#print(self.weights.var()))
X = np.asarray(X)
if X.ndim == 1:
return X * np.sqrt(self.weights)
elif X.ndim == 2:
return np.sqrt(self.weights)[:, None]*X
def loglike(self, params):
"""
Returns the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array-like
The parameter estimates.
Returns
-------
llf : float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\\frac{n}{2}\\log\\left(Y-\\hat{Y}\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}log\\left(\\left|W\\right|\\right)
where :math:`W` is a diagonal matrix
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog,params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
class OLS(WLS):
__doc__ = """
A simple ordinary least squares model.
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
GLS
Examples
--------
>>> import numpy as np
>>>
>>> import statsmodels.api as sm
>>>
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>>
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0])))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
Notes
-----
No constant is added by the model unless you are using formulas.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
#TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params):
"""
The likelihood function for the clasical OLS model.
Parameters
----------
params : array-like
The coefficients with which to estimate the log-likelihood.
Returns
-------
The concentrated likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
return -nobs2*np.log(2*np.pi)-nobs2*np.log(1/(2*nobs2) *\
np.dot(np.transpose(self.endog -
np.dot(self.exog, params)),
(self.endog - np.dot(self.exog,params)))) -\
nobs2
def whiten(self, Y):
"""
OLS model whitener does nothing: returns Y.
"""
return Y
class GLSAR(GLS):
__doc__ = """
A regression model with an AR(p) covariance structure.
%(params)s
rho : int
Order of the autoregressive covariance
%(extra_params)s
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):
#this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0,1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
#JP this looks wrong, should be a regression on constant
#results for rho estimate now identical to yule-walker on y
#super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),
missing=missing, **kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : integer, optional
the number of iterations
"""
#TODO: update this after going through example.
for i in range(maxiter-1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
#why not another call to self.initialize
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit() #final estimate
return results # add missing return
def whiten(self, X):
"""
Whiten a series of columns according to an AR(p)
covariance structure. This drops initial p observations.
Parameters
----------
X : array-like
The data to be whitened,
Returns
-------
whitened array
"""
#TODO: notation for AR process
X = np.asarray(X, np.float64)
_X = X.copy()
#the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]
return _X[self.order:]
def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
"""
Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
Unbiased or maximum-likelihood estimator (mle)
See, for example:
http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
Parameters
----------
X : array-like
1d array
order : integer, optional
The order of the autoregressive process. Default is 1.
method : string, optional
Method can be "unbiased" or "mle" and this determines denominator in
estimate of autocorrelation function (ACF) at lag k. If "mle", the
denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
The default is unbiased.
df : integer, optional
Specifies the degrees of freedom. If `df` is supplied, then it is assumed
the X has `df` degrees of freedom rather than `n`. Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho
The autoregressive coefficients
sigma
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog,
order=4, method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
#TODO: define R better, look back at notes and technical notes on YW.
#First link here is useful
#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
X = np.array(X, dtype=np.float64)
if demean:
X -= X.mean() # automatically demean's X
n = df or X.shape[0]
if method == "unbiased": # this is df_resid ie., n - p
denom = lambda k: n - k
else:
denom = lambda k: n
if X.ndim > 1 and X.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (X**2).sum() / denom(0)
for k in range(1,order+1):
r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv==True:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
class RegressionResults(base.LikelihoodModelResults):
"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Returns
-------
**Attributes**
aic
Aikake's information criteria. For a model with a constant
:math:`-2llf + 2(df_model + 1)`. For a model without a constant
:math:`-2llf + 2(df_model)`.
bic
Bayes' information criteria For a model with a constant
:math:`-2llf + \log(n)(df_model+1)`. For a model without a constant
:math:`-2llf + \log(n)(df_model)`
bse
The standard errors of the parameter estimates.
pinv_wexog
See specific model class docstring
centered_tss
The total (weighted) sum of squares centered about the mean.
cov_HC0
Heteroscedasticity robust covariance matrix. See HC0_se below.
cov_HC1
Heteroscedasticity robust covariance matrix. See HC1_se below.
cov_HC2
Heteroscedasticity robust covariance matrix. See HC2_se below.
cov_HC3
Heteroscedasticity robust covariance matrix. See HC3_se below.
cov_type
Parameter covariance estimator used for standard errors and t-stats
df_model
Model degress of freedom. The number of regressors `p`. Does not
include the constant if one is present
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
ess
Explained sum of squares. If a constant is present, the centered
total sum of squares minus the sum of squared residuals. If there is
no constant, the uncentered total sum of squares is used.
fvalue
F-statistic of the fully specified model. Calculated as the mean
squared error of the model divided by the mean squared error of the
residuals.
f_pvalue
p-value of the F-statistic
fittedvalues
The predicted the values for the original (unwhitened) design.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a cached property.
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_see is a cached property.
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_see is a cached property.
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_see is a cached property.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
model
A pointer to the model instance that called fit() or results.
mse_model
Mean squared error the model. This is the explained sum of squares
divided by the model degrees of freedom.
mse_resid
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
mse_total
Total mean squared error. Defined as the uncentered total sum of
squares divided by n the number of observations.
nobs
Number of observations n.
normalized_cov_params
See specific model class docstring
params
The linear coefficients that minimize the least squares criterion. This
is usually called Beta for the classical linear model.
pvalues
The two-tailed p values for the t-stats of the params.
resid
The residuals of the model.
resid_pearson
`wresid` normalized to have unit variance.
rsquared
R-squared of a model with an intercept. This is defined here as
1 - `ssr`/`centered_tss` if the constant is included in the model and
1 - `ssr`/`uncentered_tss` if the constant is omitted.
rsquared_adj
Adjusted R-squared. This is defined here as
1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included
and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.
scale
A scale factor for the covariance matrix.
Default value is ssr/(n-p). Note that the square root of `scale` is
often called the standard error of the regression.
ssr
Sum of squared (whitened) residuals.
uncentered_tss
Uncentered sum of squares. Sum of the squared values of the
(whitened) endogenous response variable.
wresid
The residuals of the transformed/whitened regressand and regressor(s)
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None):
super(RegressionResults, self).__init__(model, params,
normalized_cov_params,
scale)
self._cache = resettable_cache()
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
self.use_t = True # TODO: class default
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
def __str__(self):
self.summary()
def conf_int(self, alpha=.05, cols=None):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
return self.model.wendog - self.model.predict(self.params,
self.model.wexog)
@cache_readonly
def resid(self):
return self.model.endog - self.model.predict(self.params,
self.model.exog)
#TODO: fix writable example
@cache_writable()
def scale(self):
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
model = self.model
weights = getattr(model, 'weights', None)
if weights is not None:
return np.sum(weights*(model.endog - np.average(model.endog,
weights=weights))**2)
else: # this is probably broken for GLS
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)
@cache_readonly
def mse_model(self):
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implcit constant, e.g. all dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
#TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:,None]*self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
See statsmodels.RegressionResults
"""
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale=(self.wresid/(1-h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
An array wresid/sqrt(scale)
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# don't divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller model are spanned
by the regressors in the larger model and the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:,None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of regressors
return np.allclose(score_l2,0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""Use Lagrange Multiplier test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the residuals
from the restricted model. If True, the covariance of the scores
are used and the LM test is identical to the large sample version
of the LR test.
Returns
-------
lm_value : float
test statistic, chi2 distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
TODO: explain LM text
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:,None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:,None]
demean = False
if demean:
scores = scores - scores.mean(0)[None,:]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, shoudl use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
XpX = np.dot(wexog.T,wexog) / n
Sinv = inv(sigma2 * XpX)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
Sinv = inv(np.dot(scores.T,scores) / n)
elif cov_type == 'HAC':
print("HAC")
maxlags = self.cov_kwds['maxlags']
Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
#cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
Sinv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * chain_dot(s,Sinv,s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""use F test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
f_value : float
test statistic, F distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two models.
This is not a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results under the assumption of homoscedasticity
and no autocorrelation (sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
likelihood ratio, chisquare distributed with df_diff degrees of
freedom
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
The exact likelihood ratio is valid for homoskedastic data, and is
defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\mathcal{L}` is the likelihood of the model. With :math:`D`
distributed as chisquare with df equal to difference in number of
parameters or equivalently difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
TODO: put into separate function, needs tests
"""
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`true` if the cov_type is nonrobust, and `false` in all other cases.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument 'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calulated with a small
sample correction.
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series for
each panel unit or cluster need to be stacked.
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
Reminder:
`use_correction` in "nw-groupsum" and "nw-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = ('Standard Errors are based on ' +
'fixed scale')
res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covarians ' +
'does not use keywords')
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'robust ' + '(' + cov_type + ')')
# TODO cannot access cov without calling se first
getattr(self, cov_type.upper() + '_se')
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type == 'HAC':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'and autocorrelation robust (HAC) using %d lags and %s small ' +
'sample correction') % (maxlags, ['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
use_correction=use_correction)
elif cov_type == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist()
groupidx = lzip([0] + tt, tt + [len(time)])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(np.diff(time) < 0)[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = (
'Driscoll and Kraay Standard Errors are robust to ' +
'cluster correlation ' + '(' + cov_type + ')')
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwds):
return pred.get_prediction(self, exog=exog, transform=transform,
weights=weights, row_labels=row_labels, **kwds)
get_prediction.__doc__ = pred.get_prediction.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
#add warnings/notes, added to text format only
etext =[]
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
#top = summary_top(self, gleft=topleft, gright=diagn_left, #[],
# yname=yname, xname=xname,
# title=self.model.__class__.__name__ + ' ' +
# "Regression Results")
#par = summary_params(self, yname=yname, xname=xname, alpha=.05,
# use_t=False)
#
#diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="Linear Model")
#
#return summary_return([top, par, diagn], return_fmt=return_fmt)
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function to summarize the regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from statsmodels.compat.collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) #in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
def get_influence(self):
"""
get an instance of Influence with influence and outlier measures
Returns
-------
infl : Influence instance
the instance has methods to calculate the main influence and
outlier measures for the OLS regression
See also
--------
:class:`statsmodels.stats.outliers_influence.OLSInfluence`
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05):
"""
Test observations for outliers according to method
Parameters
----------
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha)
def el_test(self, b0_vals, param_nums, return_weights=0,
ret_params=0, method='nm',
stochastic_exog=1, return_params=0):
"""
Tests single or joint hypotheses of the regression parameters using
Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested
param_nums : 1darray
The parameter number to be tested
print_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. Default is False
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default = TRUE
Returns
-------
res : tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.test_beta([0], [1])
>>> (1.7894660442330235e-07, 27.248146353709153)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress([],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,
maxiter=10000, full_output=1, disp=0,
args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,
method='nm', stochastic_exog=1):
"""
Computes the confidence interval for the parameter given by param_num
using Empirical Likelihood
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired
sig : float
The significance level. Default is .05
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
Returns
-------
ci : tuple
The confidence interval
See Also
--------
el_test
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical
value.
The function returns the results of each iteration of brentq at
each value of beta.
The current function value of the last printed optimization
should be the critical value at the desired significance level.
For alpha=.05, the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to
do el_test([lower_limit], [param_num])
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0]-r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq' : 'columns',
'sresid' : 'rows',
'weights' : 'rows',
'wresid' : 'rows',
'bcov_unscaled' : 'cov',
'bcov_scaled' : 'cov',
'HC0_se' : 'columns',
'HC1_se' : 'columns',
'HC2_se' : 'columns',
'HC3_se' : 'columns',
'norm_resid' : 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_results = OLS(data.endog, data.exog).fit() #results
gls_results = GLS(data.endog, data.exog).fit() #results
print(ols_results.summary())
tables = ols_results.summary(returns='tables')
csv = ols_results.summary(returns='csv')
"""
Summary of Regression Results
=======================================
| Dependent Variable: ['y']|
| Model: OLS|
| Method: Least Squares|
| Date: Tue, 29 Jun 2010|
| Time: 22:32:21|
| # obs: 16.0|
| Df residuals: 9.0|
| Df model: 6.0|
===========================================================================
| coefficient std. error t-statistic prob.|
---------------------------------------------------------------------------
| x1 15.0619 84.9149 0.1774 0.8631|
| x2 -0.0358 0.0335 -1.0695 0.3127|
| x3 -2.0202 0.4884 -4.1364 0.002535|
| x4 -1.0332 0.2143 -4.8220 0.0009444|
| x5 -0.0511 0.2261 -0.2261 0.8262|
| x6 1829.1515 455.4785 4.0159 0.003037|
| const -3482258.6346 890420.3836 -3.9108 0.003560|
===========================================================================
| Models stats Residual stats |
---------------------------------------------------------------------------
| R-squared: 0.995479 Durbin-Watson: 2.55949 |
| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |
| F-statistic: 330.285 Prob(Omnibus): 0.687765 |
| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |
| Log likelihood: -109.617 Prob(JB): 0.838294 |
| AIC criterion: 233.235 Skew: 0.419984 |
| BIC criterion: 238.643 Kurtosis: 2.43373 |
---------------------------------------------------------------------------
"""
|
wwf5067/statsmodels
|
statsmodels/regression/linear_model.py
|
Python
|
bsd-3-clause
| 92,203
|
[
"Gaussian"
] |
076110aa09b581b1d6d47c186e988ca8d8292a5e261cc8aca1aacbf3306b66d4
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, range, zip
from itertools import cycle
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon, Rectangle
import six
from skbio.util._decorator import deprecated
distribution_plot_deprecation_p = {
'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
"Plots that are not specific to bioinformatics should be generated "
"with seaborn or another general-purpose plotting package."
)}
@deprecated(**distribution_plot_deprecation_p)
def boxplots(distributions, x_values=None, x_tick_labels=None, title=None,
x_label=None, y_label=None, x_tick_labels_orientation='vertical',
y_min=None, y_max=None, whisker_length=1.5, box_width=0.5,
box_colors=None, figure_width=None, figure_height=None,
legend=None):
"""Generate a figure with a boxplot for each distribution.
Parameters
----------
distributions: 2-D array_like
Distributions to plot. A boxplot will be created for each distribution.
x_values : list of numbers, optional
List indicating where each boxplot should be placed. Must be the same
length as `distributions` if provided.
x_tick_labels : list of str, optional
List of x-axis tick labels.
title : str, optional
Title of the plot.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of the x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
Length of the whiskers as a function of the IQR. For example, if 1.5,
the whiskers extend to ``1.5 * IQR``. Anything outside of that range is
treated as an outlier.
box_width : scalar, optional
Width of each box in plot units.
box_colors : str, tuple, or list of colors, optional
Either a matplotlib-compatible string or tuple that indicates the color
to be used for every boxplot, or a list of colors to color each boxplot
individually. If ``None``, boxes will be the same color as the plot
background. If a list of colors is provided, a color must be provided
for each boxplot. Can also supply ``None`` instead of a color, which
will color the box the same color as the plot background.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
legend : tuple or list, optional
Two-element tuple or list that contains a list of valid matplotlib
colors as the first element and a list of labels (strings) as the
second element. The lengths of the first and second elements must be
the same. If ``None``, a legend will not be plotted.
Returns
-------
matplotlib.figure.Figure
Figure containing a boxplot for each distribution.
See Also
--------
matplotlib.pyplot.boxplot
scipy.stats.ttest_ind
Notes
-----
This is a convenience wrapper around matplotlib's ``boxplot`` function that
allows for coloring of boxplots and legend generation.
Examples
--------
Create a plot with two boxplots:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
Plot three distributions with custom colors and labels:
.. plot::
>>> from skbio.draw import boxplots
>>> fig = boxplots(
... [[2, 2, 1, 3], [0, -1, 0, 0.1, 0.3], [4, 5, 6, 3]],
... x_tick_labels=('Control', 'Treatment 1', 'Treatment 2'),
... box_colors=('green', 'blue', 'red'))
"""
distributions = _validate_distributions(distributions)
num_dists = len(distributions)
_validate_x_values(x_values, x_tick_labels, num_dists)
# Create a new figure to plot our data on, and then plot the distributions.
fig, ax = plt.subplots()
box_plot = plt.boxplot(distributions, positions=x_values,
whis=whisker_length, widths=box_width)
if box_colors is not None:
if _is_single_matplotlib_color(box_colors):
box_colors = [box_colors] * num_dists
_color_box_plot(ax, box_plot, box_colors)
# Set up the various plotting options, such as x- and y-axis labels, plot
# title, and x-axis values if they have been supplied.
_set_axes_options(ax, title, x_label, y_label,
x_tick_labels=x_tick_labels,
x_tick_labels_orientation=x_tick_labels_orientation,
y_min=y_min, y_max=y_max)
if legend is not None:
if len(legend) != 2:
raise ValueError("Invalid legend was provided. The legend must be "
"a two-element tuple/list where the first "
"element is a list of colors and the second "
"element is a list of labels.")
_create_legend(ax, legend[0], legend[1], 'colors')
_set_figure_size(fig, figure_width, figure_height)
return fig
@deprecated(**distribution_plot_deprecation_p)
def grouped_distributions(plot_type, data, x_values=None,
data_point_labels=None, distribution_labels=None,
distribution_markers=None, x_label=None,
y_label=None, title=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None, whisker_length=1.5,
error_bar_type='stdv', distribution_width=None,
figure_width=None, figure_height=None):
"""Generate a figure with distributions grouped at points along the x-axis.
Parameters
----------
plot_type : {'bar', 'scatter', 'box'}
Type of plot to visualize distributions with.
data : list of lists of lists
Each inner list represents a data point along the x-axis. Each data
point contains lists of data for each distribution in the group at that
point. This nesting allows for the grouping of distributions at each
data point.
x_values : list of scalars, optional
Spacing of data points along the x-axis. Must be the same length as the
number of data points and be in ascending sorted order. If not
provided, plots will be spaced evenly.
data_point_labels : list of str, optional
Labels for data points.
distribution_labels : list of str, optional
Labels for each distribution in a data point grouping.
distribution_markers : list of str or list of tuple, optional
Matplotlib-compatible strings or tuples that indicate the color or
symbol to be used to distinguish each distribution in a data point
grouping. Colors will be used for bar charts or box plots, while
symbols will be used for scatter plots.
x_label : str, optional
x-axis label.
y_label : str, optional
y-axis label.
title : str, optional
Plot title.
x_tick_labels_orientation : {'vertical', 'horizontal'}
Orientation of x-axis labels.
y_min : scalar, optional
Minimum value of the y-axis. If ``None``, uses matplotlib's autoscale.
y_max : scalar, optional
Maximum value of the y-axis. If ``None``, uses matplotlib's autoscale.
whisker_length : scalar, optional
If `plot_type` is ``'box'``, determines the length of the whiskers as a
function of the IQR. For example, if 1.5, the whiskers extend to
``1.5 * IQR``. Anything outside of that range is seen as an outlier.
If `plot_type` is not ``'box'``, this parameter is ignored.
error_bar_type : {'stdv', 'sem'}
Type of error bars to use if `plot_type` is ``'bar'``. Can be either
``'stdv'`` (for standard deviation) or ``'sem'`` for the standard error
of the mean. If `plot_type` is not ``'bar'``, this parameter is
ignored.
distribution_width : scalar, optional
Width in plot units of each individual distribution (e.g. each bar if
the plot type is a bar chart, or the width of each box if the plot type
is a boxplot). If None, will be automatically determined.
figure_width : scalar, optional
Width of the plot figure in inches. If not provided, will default to
matplotlib's default figure width.
figure_height : scalar, optional
Height of the plot figure in inches. If not provided, will default to
matplotlib's default figure height.
Returns
-------
matplotlib.figure.Figure
Figure containing distributions grouped at points along the x-axis.
Examples
--------
Create a plot with two distributions grouped at three points:
.. plot::
>>> from skbio.draw import grouped_distributions
>>> fig = grouped_distributions('bar',
... [[[2, 2, 1,], [0, 1, 4]],
... [[1, 1, 1], [4, 4.5]],
... [[2.2, 2.4, 2.7, 1.0], [0, 0.2]]],
... distribution_labels=['Treatment 1',
... 'Treatment 2'])
"""
# Set up different behavior based on the plot type.
if plot_type == 'bar':
plotting_function = _plot_bar_data
distribution_centered = False
marker_type = 'colors'
elif plot_type == 'scatter':
plotting_function = _plot_scatter_data
distribution_centered = True
marker_type = 'symbols'
elif plot_type == 'box':
plotting_function = _plot_box_data
distribution_centered = True
marker_type = 'colors'
else:
raise ValueError("Invalid plot type '%s'. Supported plot types are "
"'bar', 'scatter', or 'box'." % plot_type)
num_points, num_distributions = _validate_input(data, x_values,
data_point_labels,
distribution_labels)
# Create a list of matplotlib markers (colors or symbols) that can be used
# to distinguish each of the distributions. If the user provided a list of
# markers, use it and loop around to the beginning if there aren't enough
# markers. If they didn't provide a list, or it was empty, use our own
# predefined list of markers (again, loop around to the beginning if we
# need more markers).
distribution_markers = _get_distribution_markers(marker_type,
distribution_markers,
num_distributions)
# Now calculate where each of the data points will start on the x-axis.
x_locations = _calc_data_point_locations(num_points, x_values)
assert (len(x_locations) == num_points), "The number of x_locations " +\
"does not match the number of data points."
if distribution_width is None:
# Find the smallest gap between consecutive data points and divide this
# by the number of distributions + 1 for some extra spacing between
# data points.
min_gap = max(x_locations)
for i in range(len(x_locations) - 1):
curr_gap = x_locations[i + 1] - x_locations[i]
if curr_gap < min_gap:
min_gap = curr_gap
distribution_width = min_gap / float(num_distributions + 1)
else:
if distribution_width <= 0:
raise ValueError("The width of a distribution cannot be less than "
"or equal to zero.")
result, plot_axes = plt.subplots()
# Iterate over each data point, and plot each of the distributions at that
# data point. Increase the offset after each distribution is plotted,
# so that the grouped distributions don't overlap.
for point, x_pos in zip(data, x_locations):
dist_offset = 0
for dist_index, dist, dist_marker in zip(range(num_distributions),
point, distribution_markers):
dist_location = x_pos + dist_offset
plotting_function(plot_axes, dist, dist_marker, distribution_width,
dist_location, whisker_length, error_bar_type)
dist_offset += distribution_width
# Set up various plot options that are best set after the plotting is done.
# The x-axis tick marks (one per data point) are centered on each group of
# distributions.
plot_axes.set_xticks(_calc_data_point_ticks(x_locations,
num_distributions,
distribution_width,
distribution_centered))
_set_axes_options(plot_axes, title, x_label, y_label, x_values,
data_point_labels, x_tick_labels_orientation, y_min,
y_max)
if distribution_labels is not None:
_create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type)
_set_figure_size(result, figure_width, figure_height)
# matplotlib seems to sometimes plot points on the rightmost edge of the
# plot without adding padding, so we need to add our own to both sides of
# the plot. For some reason this has to go after the call to draw(),
# otherwise matplotlib throws an exception saying it doesn't have a
# renderer. Boxplots need extra padding on the left.
if plot_type == 'box':
left_pad = 2 * distribution_width
else:
left_pad = distribution_width
plot_axes.set_xlim(plot_axes.get_xlim()[0] - left_pad,
plot_axes.get_xlim()[1] + distribution_width)
return result
def _validate_distributions(distributions):
dists = []
for distribution in distributions:
try:
distribution = np.asarray(distribution, dtype=float)
except ValueError:
raise ValueError("Each value in each distribution must be "
"convertible to a number.")
# Empty distributions are plottable in mpl < 1.4.0. In 1.4.0, a
# ValueError is raised. This has been fixed in mpl 1.4.0-dev (see
# https://github.com/matplotlib/matplotlib/pull/3571). In order to
# support empty distributions across mpl versions, we replace them with
# [np.nan]. See https://github.com/pydata/pandas/issues/8382,
# https://github.com/matplotlib/matplotlib/pull/3571, and
# https://github.com/pydata/pandas/pull/8240 for details.
# If we decide to only support mpl > 1.4.0 in the future, this code can
# likely be removed in favor of letting mpl handle empty distributions.
if distribution.size > 0:
dists.append(distribution)
else:
dists.append(np.array([np.nan]))
return dists
def _validate_input(data, x_values, data_point_labels, distribution_labels):
"""Returns a tuple containing the number of data points and distributions
in the data.
Validates plotting options to make sure they are valid with the supplied
data.
"""
if data is None or not data or isinstance(data, six.string_types):
raise ValueError("The data must be a list type, and it cannot be "
"None or empty.")
num_points = len(data)
num_distributions = len(data[0])
empty_data_error_msg = ("The data must contain at least one data "
"point, and each data point must contain at "
"least one distribution to plot.")
if num_points == 0 or num_distributions == 0:
raise ValueError(empty_data_error_msg)
for point in data:
if len(point) == 0:
raise ValueError(empty_data_error_msg)
if len(point) != num_distributions:
raise ValueError("The number of distributions in each data point "
"grouping must be the same for all data points.")
# Make sure we have the right number of x values (one for each data point),
# and make sure they are numbers.
_validate_x_values(x_values, data_point_labels, num_points)
if (distribution_labels is not None and
len(distribution_labels) != num_distributions):
raise ValueError("The number of distribution labels must be equal "
"to the number of distributions.")
return num_points, num_distributions
def _validate_x_values(x_values, x_tick_labels, num_expected_values):
"""Validates the x values provided by the user, making sure they are the
correct length and are all numbers.
Also validates the number of x-axis tick labels.
Raises a ValueError if these conditions are not met.
"""
if x_values is not None:
if len(x_values) != num_expected_values:
raise ValueError("The number of x values must match the number "
"of data points.")
try:
list(map(float, x_values))
except:
raise ValueError("Each x value must be a number.")
if x_tick_labels is not None:
if len(x_tick_labels) != num_expected_values:
raise ValueError("The number of x-axis tick labels must match the "
"number of data points.")
def _get_distribution_markers(marker_type, marker_choices, num_markers):
"""Returns a list of length num_markers of valid matplotlib colors or
symbols.
The markers will be comprised of those found in marker_choices (if not None
and not empty) or a list of predefined markers (determined by marker_type,
which can be either 'colors' or 'symbols'). If there are not enough
markers, the list of markers will be reused from the beginning again (as
many times as are necessary).
"""
if num_markers < 0:
raise ValueError("num_markers must be greater than or equal to zero.")
if marker_choices is None or len(marker_choices) == 0:
if marker_type == 'colors':
marker_choices = ['b', 'g', 'r', 'c', 'm', 'y', 'w']
elif marker_type == 'symbols':
marker_choices = \
['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
if len(marker_choices) < num_markers:
# We don't have enough markers to represent each distribution uniquely,
# so let the user know. We'll add as many markers (starting from the
# beginning of the list again) until we have enough, but the user
# should still know because they may want to provide a new list of
# markers.
warnings.warn(
"There are not enough markers to uniquely represent each "
"distribution in your dataset. You may want to provide a list "
"of markers that is at least as large as the number of "
"distributions in your dataset.",
RuntimeWarning)
marker_cycle = cycle(marker_choices[:])
while len(marker_choices) < num_markers:
marker_choices.append(next(marker_cycle))
return marker_choices[:num_markers]
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs
def _calc_data_point_ticks(x_locations, num_distributions, distribution_width,
distribution_centered):
"""Returns a 1D numpy array of x-axis tick positions.
These positions will be centered on each data point.
Set distribution_centered to True for scatter and box plots because their
plot types naturally center over a given horizontal position. Bar charts
should use distribution_centered = False because the leftmost edge of a bar
starts at a given horizontal position and extends to the right for the
width of the bar.
"""
dist_size = num_distributions - 1 if distribution_centered else\
num_distributions
return x_locations + ((dist_size * distribution_width) / 2)
def _plot_bar_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single bar in matplotlib."""
result = None
# We do not want to plot empty distributions because matplotlib will not be
# able to render them as PDFs.
if len(distribution) > 0:
avg = np.mean(distribution)
if error_bar_type == 'stdv':
error_bar = np.std(distribution)
elif error_bar_type == 'sem':
error_bar = np.std(distribution) / np.sqrt(len(distribution))
else:
raise ValueError(
"Invalid error bar type '%s'. Supported error bar types are "
"'stdv' and 'sem'." % error_bar_type)
result = plot_axes.bar(x_position, avg, distribution_width,
yerr=error_bar, ecolor='black',
facecolor=distribution_color)
return result
def _plot_scatter_data(plot_axes, distribution, distribution_symbol,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single scatterplot in matplotlib."""
result = None
x_vals = [x_position] * len(distribution)
# matplotlib's scatter function doesn't like plotting empty data.
if len(x_vals) > 0 and len(distribution) > 0:
result = plot_axes.scatter(x_vals, distribution,
marker=distribution_symbol, c='k')
return result
def _plot_box_data(plot_axes, distribution, distribution_color,
distribution_width, x_position, whisker_length,
error_bar_type):
"""Returns the result of plotting a single boxplot in matplotlib."""
result = None
if len(distribution) > 0:
result = plot_axes.boxplot([distribution], positions=[x_position],
widths=distribution_width,
whis=whisker_length)
_color_box_plot(plot_axes, result, [distribution_color])
return result
def _is_single_matplotlib_color(color):
"""Returns True if color is a single (not a list) mpl color."""
single_color = False
if (isinstance(color, six.string_types)):
single_color = True
elif len(color) == 3 or len(color) == 4:
single_color = True
for e in color:
if not (isinstance(e, float) or isinstance(e, int)):
single_color = False
return single_color
def _color_box_plot(plot_axes, box_plot, colors):
"""Color boxes in the box plot with the specified colors.
If any of the colors are None, the box will not be colored.
The box_plot argument must be the dictionary returned by the call to
matplotlib's boxplot function, and the colors argument must consist of
valid matplotlib colors.
"""
# Note: the following code is largely taken from this matplotlib boxplot
# example:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# boxplot_demo2.html
num_colors = len(colors)
num_box_plots = len(box_plot['boxes'])
if num_colors != num_box_plots:
raise ValueError("The number of colors (%d) does not match the number "
"of boxplots (%d)." % (num_colors, num_box_plots))
for box, median, color in zip(box_plot['boxes'],
box_plot['medians'],
colors):
if color is not None:
box_x = []
box_y = []
# There are five points in the box. The first is the same as
# the last.
for i in range(5):
box_x.append(box.get_xdata()[i])
box_y.append(box.get_ydata()[i])
box_coords = list(zip(box_x, box_y))
box_polygon = Polygon(box_coords, facecolor=color)
plot_axes.add_patch(box_polygon)
# Draw the median lines back over what we just filled in with
# color.
median_x = []
median_y = []
for i in range(2):
median_x.append(median.get_xdata()[i])
median_y.append(median.get_ydata()[i])
plot_axes.plot(median_x, median_y, 'black')
def _set_axes_options(plot_axes, title=None, x_label=None, y_label=None,
x_values=None, x_tick_labels=None,
x_tick_labels_orientation='vertical', y_min=None,
y_max=None):
"""Applies various labelling options to the plot axes."""
if title is not None:
plot_axes.set_title(title)
if x_label is not None:
plot_axes.set_xlabel(x_label)
if y_label is not None:
plot_axes.set_ylabel(y_label)
if (x_tick_labels_orientation != 'vertical' and
x_tick_labels_orientation != 'horizontal'):
raise ValueError("Invalid orientation for x-axis tick labels: '%s'. "
"Valid orientations are 'vertical' or 'horizontal'."
% x_tick_labels_orientation)
# If labels are provided, always use them. If they aren't, use the x_values
# that denote the spacing between data points as labels. If that isn't
# available, simply label the data points in an incremental fashion,
# i.e. 1, 2, 3, ..., n, where n is the number of data points on the plot.
if x_tick_labels is not None:
plot_axes.set_xticklabels(x_tick_labels,
rotation=x_tick_labels_orientation)
elif x_tick_labels is None and x_values is not None:
plot_axes.set_xticklabels(x_values, rotation=x_tick_labels_orientation)
else:
plot_axes.set_xticklabels(
range(1, len(plot_axes.get_xticklabels()) + 1),
rotation=x_tick_labels_orientation)
# Set the y-axis range if specified.
if y_min is not None:
plot_axes.set_ylim(bottom=float(y_min))
if y_max is not None:
plot_axes.set_ylim(top=float(y_max))
def _create_legend(plot_axes, distribution_markers, distribution_labels,
marker_type):
"""Creates a legend on the supplied axes."""
# We have to use a proxy artist for the legend because box plots currently
# don't have a very useful legend in matplotlib, and using the default
# legend for bar/scatterplots chokes on empty/null distributions.
#
# Note: This code is based on the following examples:
# http://matplotlib.sourceforge.net/users/legend_guide.html
# http://stackoverflow.com/a/11423554
if len(distribution_markers) != len(distribution_labels):
raise ValueError("The number of distribution markers does not match "
"the number of distribution labels.")
if marker_type == 'colors':
legend_proxy = [Rectangle((0, 0), 1, 1, fc=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, loc='best')
elif marker_type == 'symbols':
legend_proxy = [Line2D(range(1), range(1), color='white',
markerfacecolor='black', marker=marker)
for marker in distribution_markers]
plot_axes.legend(legend_proxy, distribution_labels, numpoints=3,
scatterpoints=3, loc='best')
else:
raise ValueError("Invalid marker_type: '%s'. marker_type must be "
"either 'colors' or 'symbols'." % marker_type)
def _set_figure_size(fig, width=None, height=None):
"""Sets the plot figure size and makes room for axis labels, titles, etc.
If both width and height are not provided, will use matplotlib defaults.
Making room for labels will not always work, and if it fails, the user will
be warned that their plot may have cut-off labels.
"""
# Set the size of the plot figure, then make room for the labels so they
# don't get cut off. Must be done in this order.
if width is not None and height is not None and width > 0 and height > 0:
fig.set_size_inches(width, height)
try:
fig.tight_layout()
except ValueError:
warnings.warn(
"Could not automatically resize plot to make room for "
"axes labels and plot title. This can happen if the labels or "
"title are extremely long and the plot size is too small. Your "
"plot may have its labels and/or title cut-off. To fix this, "
"try increasing the plot's size (in inches) and try again.",
RuntimeWarning)
|
xguse/scikit-bio
|
skbio/draw/_distributions.py
|
Python
|
bsd-3-clause
| 30,987
|
[
"scikit-bio"
] |
acca8da01023d8a913ba5aa7fb836020b116adfab86c955a182dfbfc73b3cf3a
|
# Be friendly
import random
from espresso.main import robot
HELLOS = [
"Well hello there!"
"Hello!",
"Hi there!",
"Hallo!"
]
@robot.respond(r'(?i)(hi)|(hello)|(howdy)|(hallo)')
def hello(res):
res.reply(res.msg.user, random.choice(HELLOS))
|
ratchetrobotics/espresso
|
plugins/hello.py
|
Python
|
bsd-3-clause
| 264
|
[
"ESPResSo"
] |
309c5f1adea22853f49ebe86c24c3328a74272951955ce259c1f82568c085280
|
import logging
from kalliope.core import NeuronModule
from kalliope.core.NeuronModule import MissingParameterException
from kalliope.core.NotificationManager import NotificationManager
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Signals(NeuronModule):
def __init__(self, **kwargs):
super(Signals, self).__init__(**kwargs)
# get the command
self.notification = kwargs.get('notification', None)
self.payload = kwargs.get('payload', None)
if self._is_parameters_ok():
logger.debug("[Signals] Send a notification to all subscribed classes, notification: '%s', payload: %s"
% (self.notification, self.payload))
NotificationManager.send_notification(self.notification, self.payload)
def _is_parameters_ok(self):
if self.notification is None:
raise MissingParameterException("[Signals] This neuron require a 'notification parameter'")
return True
|
kalliope-project/kalliope
|
kalliope/neurons/signals/signals.py
|
Python
|
gpl-3.0
| 999
|
[
"NEURON"
] |
b3e26e5476a462c99c0068076ebbe4a8f45d657dde08c7f7edaf4189faeaf0aa
|
'''
Sample script that visits a random profile from the quickmatch page,
and prints out most of the information in the profile.
'''
from pyokc import pyokc
u = pyokc.User()
p = u.quickmatch() # p is an instance of the Profile class
u.visit(p)
p.update_traits() # only necessary for if you want to fill in p.traits
print('Profile of {0}'.format(p.name))
print('{0}: {1}'.format('Gender', p.gender))
print('{0}: {1}'.format('Age', p.age))
print('{0}: {1}'.format('Orientation', p.orientation))
print('{0}: {1}'.format('Location', p.location))
print('{0}: {1}%'.format('Match', p.match))
print('{0}: {1}%'.format('Enemy', p.enemy))
print('----------')
print('')
print('Traits')
print('----------')
for trait in p.traits:
print(trait)
print('')
print('Essays')
print('----------')
for title, essay in p.essays.items():
print('{0}: {1}'.format(title, essay))
print('')
print('Looking For')
print('----------')
for category, response in p.looking_for.items():
print('{0}: {1}'.format(category, response))
print('')
print('Details')
print('----------')
for category, detail in p.details.items():
print('{0}: {1}'.format(category, detail))
|
tfmorris/pyokc
|
examples/profile.py
|
Python
|
mit
| 1,159
|
[
"VisIt"
] |
6e938ef55165989c26fae54928c57e85192e1e7edb23cfdd19f814cb3e4b244d
|
""" Top-level module to build or re-build the JSON files for
FRB host galaxies"""
from pkg_resources import resource_filename
import os
import sys
import warnings
from IPython import embed
import numpy as np
import requests
import pandas
from astropy.coordinates import SkyCoord
from astropy import units
from astropy.table import Table
from astropy.coordinates import match_coordinates_sky
from frb.frb import FRB
from frb.galaxies import frbgalaxy, defs, offsets
from frb.galaxies import photom as frbphotom
try:
from frb.galaxies import ppxf
except:
print('WARNING: ppxf not installed')
from frb.galaxies import nebular
from frb.galaxies import utils as galaxy_utils
from frb.galaxies import hosts
from frb.surveys import des
from frb.surveys import sdss
from frb.surveys import wise
from frb.surveys import panstarrs
from frb.surveys import catalog_utils
from frb.surveys import survey_utils
from frb import utils
import pandas
try:
import extinction
except ImportError:
print("extinction package not loaded. Extinction corrections will fail")
try:
from frb.galaxies import cigale
except ModuleNotFoundError:
warnings.warn("You haven't installed pcigale and won't be able to do that analysis")
try:
from frb.galaxies import eazy as frbeazy
except:
warnings.warn("NOT READY FOR EAZY!")
from linetools.spectra.xspectrum1d import XSpectrum1D
db_path = os.getenv('FRB_GDB')
if db_path is None:
print("Warning, you need to set $FRB_GDB to build hosts")
#embed(header='You need to set $FRB_GDB')
ebv_method = 'SandF'
fill_value = -999.
# New astrometry
mannings2021_astrom = pandas.read_csv(os.path.join(resource_filename('frb','data'),
'Galaxies','Additional','Mannings2021',
'astrometry_v2.csv'))
# Probably will rename this
mannings2021_astrom = mannings2021_astrom[
(mannings2021_astrom.Filter == 'F160W') | (
mannings2021_astrom.Filter == 'F110W')].copy()
def assign_z(ztbl_file:str, host:frbgalaxy.FRBHost):
"""Assign a redshift using one of the Galaxy_DB tables
Args:
ztbl_file (str): table file
host (frbgalaxy.FRBHost): host object
Raises:
ValueError: [description]
"""
# Load redshift table
ztbl = Table.read(ztbl_file, format='ascii.fixed_width')
z_coord = SkyCoord(ztbl['JCOORD'], unit=(units.hourangle, units.deg))
idx, d2d, _ = match_coordinates_sky(host.coord, z_coord, nthneighbor=1)
if np.min(d2d) > 0.5*units.arcsec:
raise ValueError("No matching galaxy!")
# Set redshift
host.set_z(ztbl['ZEM'][idx], 'spec')
def search_for_file(projects, references, root:str,
prefix='ref', return_last_file=False):
""" Search for a given data file
If multiple files are found, the *last* one is returned
Args:
projects ([type]): [description]
references ([type]): [description]
root (str): [description]
prefix (str, optional): [description]. Defaults to 'ref'.
return_last_file (bool, optional): [description]. Defaults to False.
Returns:
tuple: bool, str [file was found?, name of file with path]
"""
found_file = None
found = False
for project, ref in zip(projects, references):
GDB_path = os.path.join(db_path, project, ref)
if prefix == 'ref':
filename = os.path.join(GDB_path, ref.lower()+root)
else:
filename = os.path.join(GDB_path, prefix+root)
# Is it there?
if os.path.isfile(filename):
found_file = filename
found = True
if not found and return_last_file:
found_file = filename
#
return found, found_file
def read_lit_table(lit_entry, coord=None):
""" Reade a literature table
Args:
lit_entry (pandas row): Row of the overview table
coord (astropy.coordiantes.SkyCoord, optional): Coordinate
for the galaxy of interest. Defaults to None.
Raises:
ValueError: [description]
Returns:
astropy.table.Table: table of literature data
"""
literature_path = os.path.join(resource_filename('frb', 'data'),
'Galaxies', 'Literature')
lit_file = os.path.join(literature_path, lit_entry.Table)
if lit_entry.Format == 'csv':
lit_tbl = Table.from_pandas(pandas.read_csv(lit_file))
else:
lit_tbl = Table.read(lit_file, format=lit_entry.Format)
#
if coord is not None:
tbl_coord = SkyCoord(ra=lit_tbl['ra'], dec=lit_tbl['dec'], unit='deg')
sep = coord.separation(tbl_coord)
match = sep < 1*units.arcsec
nmatch = np.sum(match)
if nmatch == 0:
return None
elif nmatch == 1:
idx = int(np.where(match)[0])
return lit_tbl[idx:idx+1]
else:
raise ValueError("More than one match in the table!!!")
else:
# Return
return lit_tbl
def run(host_input:pandas.core.series.Series,
build_ppxf:bool=False,
lit_refs:str=None,
build_cigale:bool=False, is_host:bool=True,
override:bool=False, out_path:str=None,
outfile:str=None):
"""Main method for generating a Host JSON file
Args:
host_input (pandas.core.series.Series): Row of the CVS file
providing the host inputs
build_ppxf (bool, optional): Run pPXF?. Defaults to False.
lit_refs (str, optional): File of literature references. Defaults to None.
build_cigale (bool, optional): Run CIGALE?. Defaults to False.
NOT IMPLEMENTED (yet, but maybe never)
is_host (bool, optional): Object is a Host, as opposed
to a neighboring/foreground galaxy. Defaults to True.
override (bool, optional): Attempt to over-ride errors.
Mainly for time-outs of public data. Defaults to False.
outfile (str, optional): Over-ride default outfile [not recommended; mainly for testing]
out_path (str, optional): Over-ride default outfile [not recommended; mainly for testing]
Raises:
e: [description]
ValueError: [description]
"""
frbname = utils.parse_frb_name(host_input.FRB)
print("--------------------------------------")
print(f"Building Host galaxy for {frbname}")
gal_coord = SkyCoord(host_input.Coord, frame='icrs')
# Instantiate
Frb = FRB.by_name(frbname)
Host = frbgalaxy.FRBHost(gal_coord.ra.value,
gal_coord.dec.value,
Frb)
# File root
file_root = Host.name if is_host else utils.name_from_coord(Host.coord)
project_list = host_input.Projects.split(',') if isinstance(
host_input.Projects,str) else []
ref_list = host_input.References.split(',') if isinstance(
host_input.References,str) else []
assert len(project_list) == len(ref_list)
# UPDATE RA, DEC, OFFSETS
offsets.incorporate_hst(mannings2021_astrom, Host)
'''
# Load redshift table
assign_z()
'''
# Redshift
warnings.warn("We should be using the z Table..")
Host.set_z(host_input.z, 'spec')
# ####################
# Photometry
search_r = 1 * units.arcsec
# Survey data
try:
inside = survey_utils.in_which_survey(Frb.coord)
except (requests.exceptions.ConnectionError) as e: # Catches time-out from survey issues
if override:
print("Survey timed out. You should re-run it sometime...")
inside = {}
else:
raise e
# Loop on em
merge_tbl = None
for key in inside.keys():
# Skip?
if key in ['NVSS', 'FIRST', 'WENSS'] or not inside[key]:
continue
# Slurp
survey = survey_utils.load_survey_by_name(key,
gal_coord,
search_r)
srvy_tbl = survey.get_catalog(print_query=True)
if srvy_tbl is None or len(srvy_tbl) == 0:
continue
elif len(srvy_tbl) > 1:
raise ValueError("You found more than 1 galaxy. Uh-oh!")
warnings.warn("We need a way to reference the survey")
# Merge
if merge_tbl is None:
merge_tbl = srvy_tbl
merge_tbl['Name'] = file_root
else:
merge_tbl = frbphotom.merge_photom_tables(srvy_tbl, merge_tbl)
# Literature time
if lit_refs is None:
lit_refs = os.path.join(resource_filename('frb', 'data'), 'Galaxies',
'Literature', 'all_refs.csv')
lit_tbls = pandas.read_csv(lit_refs)
for kk in range(len(lit_tbls)):
lit_entry = lit_tbls.iloc[kk]
if 'photom' not in lit_entry.Table:
continue
# Load table
sub_tbl = read_lit_table(lit_entry, coord=Host.coord)
if sub_tbl is not None:
# Add Ref
for key in sub_tbl.keys():
if 'err' in key:
newkey = key.replace('err', 'ref')
sub_tbl[newkey] = lit_entry.Reference
# Merge?
if merge_tbl is not None:
for key in sub_tbl.keys():
if key == 'Name':
continue
if key in merge_tbl.keys():
if sub_tbl[key] == fill_value:
continue
else:
merge_tbl[key] = sub_tbl[key]
else:
if sub_tbl[key] != fill_value:
merge_tbl[key] = sub_tbl[key]
#merge_tbl = frbphotom.merge_photom_tables(sub_tbl, merge_tbl)
else:
merge_tbl = sub_tbl
merge_tbl['Name'] = file_root
# Finish
if merge_tbl is not None:
# Dust correct
EBV = nebular.get_ebv(gal_coord)['meanValue']
frbphotom.correct_photom_table(merge_tbl, EBV, Host.name)
# Parse
Host.parse_photom(merge_tbl, EBV=EBV)
else:
print(f"No photometry for {file_root}")
# CIGALE
found_cigale, cigale_file = search_for_file(
project_list, ref_list, '_CIGALE.fits',
prefix=file_root,
return_last_file=build_cigale)
if cigale_file is not None:
sfh_file = cigale_file.replace('CIGALE', 'CIGALE_SFH')
if build_cigale:
embed(header='251 -- not ready for this!')
# Prep
cut_photom = Table()
# Cut me!
for key in Host.photom.keys():
cut_photom[key] = [host191001.photom[key]]
# Run
cigale.host_run(host191001, cut_photom=cut_photom, cigale_file=cigale_file)
found_cigale = True
# Parse
if found_cigale:
print(f"Slupring in CIGALE outputs from {cigale_file}")
Host.parse_cigale(cigale_file, sfh_file=sfh_file)
else:
print(f"No CIGALE file to read for {file_root}")
# PPXF
found_ppxf, ppxf_results_file = search_for_file(
project_list, ref_list, '_ppxf.ecsv',
prefix=file_root+f'_{host_input.Spectrum}',
return_last_file=build_ppxf)
#ppxf_results_file = os.path.join(GDB_path,
# Host.name+f'_{host_input.Spectrum}_ppxf.ecsv')
if ppxf_results_file is not None:
spec_file = ppxf_results_file.replace('ecsv', 'fits')
if build_ppxf:
meta, spectrum = Host.get_metaspec(instr=host_input.Spectrum)
R = meta['R']
gaps_str = host_input.ppxf_cuts.split(';')
gaps = []
for gap in gaps_str:
gaps.append([float(item) for item in gap.split(',')])
# Correct for Galactic extinction
ebv = float(nebular.get_ebv(Host.coord)['meanValue'])
print(f'Correcting the spectrum for Galactic extinction with reddening E(B-V)={ebv}')
AV = ebv * 3.1 # RV
Al = extinction.ccm89(spectrum.wavelength.value, AV, 3.1)
# New spec
new_flux = spectrum.flux * 10**(Al/2.5)
new_sig = spectrum.sig * 10**(Al/2.5)
new_spec = XSpectrum1D.from_tuple((spectrum.wavelength, new_flux, new_sig))
#
ppxf.run(new_spec, R, host_input.z,
results_file=ppxf_results_file,
spec_fit=spec_file,
gaps=gaps, chk=True)
found_ppxf = True
# Load
if found_ppxf:
print(f"Slurping in pPXF outputs from {ppxf_results_file}")
Host.parse_ppxf(ppxf_results_file)
else:
print(f"No pPXF file to read for {file_root}")
# Slurp in literature Nebular
for kk in range(len(lit_tbls)):
lit_entry = lit_tbls.iloc[kk]
if 'nebular' not in lit_entry.Table:
continue
# Load table
lit_tbl = read_lit_table(lit_entry, coord=Host.coord)
if lit_tbl is None:
continue
# Fill me in
for key in lit_tbl.keys():
if 'err' in key:
Host.neb_lines[key] = float(lit_tbl[key].data[0])
newkey = key.replace('err', 'ref')
Host.neb_lines[newkey] = lit_entry.Reference
# Value
newkey = newkey.replace('_ref', '')
Host.neb_lines[newkey] = float(lit_tbl[newkey].data[0])
# Remove bad lines
if isinstance(host_input.Bad_EM_lines, str):
lines = host_input.Bad_EM_lines.split(',')
for line in lines:
Host.neb_lines.pop(line)
Host.neb_lines.pop(line+'_err')
# AV
if 'Halpha' in Host.neb_lines.keys() and 'Hbeta' in Host.neb_lines.keys():
Host.calc_nebular_AV('Ha/Hb')
# SFR
if 'Halpha' in Host.neb_lines.keys():
Host.calc_nebular_SFR('Ha')
elif 'Hbeta' in Host.neb_lines.keys():
Host.calc_nebular_SFR('Hb')
# Galfit
if isinstance(host_input.Galfit_filter, str):
found_galfit, galfit_file = search_for_file(
project_list, ref_list, '_galfit.fits',
prefix=file_root+'_'+host_input.Galfit_filter)
if found_galfit:
print(f"Galfit analysis slurped in via: {galfit_file}")
Host.parse_galfit(galfit_file)
else:
print(f"Galfit file with filter {host_input.Galfit_filter} not found!")
else:
print("Galfit analysis not enabled")
# Derived from literature
for kk in range(len(lit_tbls)):
lit_entry = lit_tbls.iloc[kk]
if 'derived' not in lit_entry.Table:
continue
# Load table
lit_tbl = read_lit_table(lit_entry, coord=Host.coord)
if lit_tbl is None:
continue
# Fill me in
for key in lit_tbl.keys():
if '_err' in key:
Host.derived[key] = float(lit_tbl[key].data[0])
newkey = key.replace('err', 'ref')
Host.derived[newkey] = lit_entry.Reference
# Value
newkey = newkey.replace('_ref', '')
Host.derived[newkey] = float(lit_tbl[newkey].data[0])
# Vet all
assert Host.vet_all()
# Write
if out_path is None:
out_path = os.path.join(resource_filename('frb', 'data'),
'Galaxies', f'{frbname[3:]}')
if outfile is None:
outfile = None if is_host else \
utils.name_from_coord(Host.coord) + '.json'
#utils.name_from_coord(Host.coord) + '_{}.json'.format(frbname)
Host.write_to_json(path=out_path, outfile=outfile)
def main(frbs:list, options:str=None, hosts_file:str=None, lit_refs:str=None,
override:bool=False, outfile:str=None, out_path:str=None):
""" Driver of the analysis
Args:
frbs (list): [description]
options (str, optional): [description]. Defaults to None.
hosts_file (str, optional): [description]. Defaults to None.
lit_refs (str, optional): [description]. Defaults to None.
override (bool, optional): [description]. Defaults to False.
outfile (str, optional): [description]. Defaults to None.
Here for testing
out_path (str, optional): [description]. Defaults to None.
Here for testing
"""
# Options
build_cigale, build_ppxf = False, False
if options is not None:
if 'cigale' in options:
build_cigale = True
if 'ppxf' in options:
build_ppxf = True
# Read public host table
host_tbl = hosts.load_host_tbl(hosts_file=hosts_file)
# Loop me
if frbs == 'all':
embed(header='Generate code to (i) load up the FRB table; (ii) generate a list')
elif isinstance(frbs, list):
pass
for frb in frbs:
frb_name = utils.parse_frb_name(frb, prefix='')
mt_idx = host_tbl.FRB == frb_name
idx = np.where(mt_idx)[0].tolist()
# Loop on em
is_host = True
# Do it!
for ii in idx:
run(host_tbl.iloc[ii],
build_cigale=build_cigale, build_ppxf=build_ppxf,
is_host=is_host, lit_refs=lit_refs, override=override,
outfile=outfile, out_path=out_path)
# Any additional ones are treated as candidates
is_host = False
#
print("All done!")
# Run em all
# frb_build Hosts --frb 20181112,20190711,20200906,20121102,20190102,20190714,20201124,20171020,20190523,20191001,20180301,20190608,20191228,20180916,20190611,20180924,20190614,20200430
|
FRBs/FRB
|
frb/builds/build_hosts.py
|
Python
|
bsd-3-clause
| 17,619
|
[
"Galaxy"
] |
d5bcb57b9bb348bd000ba920593f34b76519ff7df3671a249a36a4cc2ee84bca
|
'''
The settings for OSMC are handled by the OSMC Settings Addon (OSA).
In order to more easily accomodate future changes and enhancements, each OSMC settings bundle (module) is a separate addon.
The module can take the form of an xbmc service, an xbmc script, or an xbmc module, but it must be installed into the users'
/usr/share/kodi/addons folder.
The OSA collects the modules it can find, loads their icons, and launches them individually when the user clicks on an icon.
The modules can either have their own GUI, or they can leverage the settings interface provided by XBMC. If the OSG uses the XBMC
settings interface, then all of their settings must be stored in the addons settings.xml. This is true even if the source of record
is a separate config file.
An example of this type is the Pi settings module; the actual settings are read from the config.txt, then written to the
settings.xml for display in kodi, then finally all changes are written back to the config.txt. The Pi module detects user
changes to the settings by identifying the differences between a newly read settings.xml and the values from a previously
read settings.xml.
The values of the settings displayed by this module are only ever populated by the items in the settings.xml. [Note: meaning that
if the settings data is retrieved from a different source, it will need to be populated in the module before it is displayed
to the user.]
Each module must have in its folder, a sub-folder called 'resources/osmc'. Within that folder must reside this script (OSMCSetting.py),
and the icons to be used in the OSG to represent the module (FX_Icon.png and FO_Icon.png for unfocused and focused images
respectively).
When the OSA creates the OSMC Settings GUI (OSG), these modules are identified and the OSMCSetting.py script in each of them
is imported. This script provides the mechanism for the OSG to apply the changes required from a change in a setting.
The OSMCSetting.py file must have a class called OSMCSettingClass as shown below.
The key variables in this class are:
addonid : The id for the addon. This must be the id declared in the addons addon.xml.
description : The description for the module, shown in the OSA
reboot_required : A boolean to declare if the OS needs to be rebooted. If a change in a specific setting
requires an OS reboot to take affect, this is flag that will let the OSG know.
setting_data_method : This dictionary contains:
- the name of all settings in the module
- the current value of those settings
- [optional] apply - a method to call for each setting when the value changes
- [optional] translate - a method to call to translate the data before adding it to the
setting_data_method dict. The translate method must have a 'reverse' argument which
when set to True, reverses the transformation.
The key methods of this class are:
open_settings_window : This is called by the OSG when the icon is clicked. This will open the settings window.
Usually this would be __addon__.OpenSettings(), but it could be any other script.
This allows the creation of action buttons in the GUI, as well as allowing developers
to script and skin their own user interfaces.
[optional] first_method : called before any individual settings changes are applied.
[optional] final_method : called after all the individual settings changes are done.
[optional] boot_method : called when the OSA is first started.
apply_settings : This is called by the OSG to apply the changes to any settings that have changed.
It calls the first setting method, if it exists.
Then it calls the method listed in setting_data_method for each setting. Then it
calls the final method, again, if it exists.
populate_setting_data_method : This method is used to populate the setting_data_method with the current settings data.
Usually this will be from the addons setting data stored in settings.xml and retrieved
using the settings_retriever_xml method.
Sometimes the user is able to edit external setting files (such as the Pi's config.txt).
If the developer wants to use this source in place of the data stored in the
settings.xml, then they should edit this method to include a mechanism to retrieve and
parse that external data. As the window shown in the OSG populates only with data from
the settings.xml, the developer should ensure that the external data is loaded into that
xml before the settings window is opened.
settings_retriever_xml : This method is used to retrieve all the data for the settings listed in the
setting_data_method from the addons settings.xml.
The developer is free to create any methods they see fit, but the ones listed above are specifically used by the OSA.
Specifically, the apply_settings method is called when the OSA closes.
Settings changes are applied when the OSG is called to close. But this behaviour can be changed to occur when the addon
settings window closes by editing the open_settings_window. The method apply_settings will still be called by OSA, so
keep that in mind.
'''
# XBMC Modules
import xbmc
import xbmcaddon
import xbmcgui
import subprocess
import sys
import os
import threading
addonid = "script.module.osmcsetting.pi"
__addon__ = xbmcaddon.Addon(addonid)
DIALOG = xbmcgui.Dialog()
# Custom modules
sys.path.append(xbmc.translatePath(os.path.join(xbmcaddon.Addon(addonid).getAddonInfo('path'), 'resources','lib')))
# OSMC SETTING Modules
import config_tools as ct
from CompLogger import comprehensive_logger as clog
def lang(id):
san = __addon__.getLocalizedString(id).encode( 'utf-8', 'ignore' )
return san
def log(message):
xbmc.log('OSMC PI ' + str(message), level=xbmc.LOGDEBUG)
class OSMCSettingClass(threading.Thread):
'''
A OSMCSettingClass is way to substantiate the settings of an OSMC settings module, and make them available to the
OSMC Settings Addon (OSA).
'''
def __init__(self):
'''
The pi_settings_dict contains all the settings in the settings group, as well as the methods to call when a
setting_value has changed and the existing setting_value.
'''
super(OSMCSettingClass, self).__init__()
self.addonid = addonid
self.me = xbmcaddon.Addon(self.addonid)
# this is what is displayed in the main settings gui
self.shortname = 'Pi Config'
self.description = """This is the text that is shown on the OSG. [CR][CR]It should describe:[CR] - what the settings module is for,[CR] - the settings it controls,[CR] - and anything else you want, I suppose."""
self.description = """The Raspberry Pi doesn't have a conventional BIOS. System configuration parameters are stored in a "config.txt" file. For more detail, visit http://elinux.org/RPiconfig[CR]
This settings module allows you to edit your config.txt from within OSMC using a graphical interface.
The module includes:
- display rotation
- hdmi_safe & hdmi_boost
- hdmi_group & hdmi_mode
- function to save edid to file
- sdtv_mode & sdtv_aspect
- GPU memory split
- MPG2 & WVC1 licences (including status)
- your Pi's serial number
Finally, there is a Config Editor that will allow you to quickly add, edit, or delete lines in your config.txt.
Overclock settings are set using the Pi Overclock module."""
self.not_going_to_config = [ 'store_hdmi_to_file',
'gpu_mem',
]
self.values_set_elsewhere = [ 'hdmi_edid_file',
'hdmi_force_hotplug',
]
# The setting_value in this dict is what is used in the settings.xml. The need to be translated from any external source,
# line the config.txt, and then translated again for writing back.
# I have added a translate method to translate the data recieved from an external source before adding it to the setting dict
# I have also added a default setting here, because the settings stored in the settings.xml cannot be relied upon,
# because if the user adds a setting, then deletes it offline, the settings.xml will add it back in when the addon exits.
# A default value of configignore means that the setting should never be passed to the config parser.
self.pi_settings_dict = {
'hdmi_safe': {'setting_value' : '',
'default': 'false',
'translate': self.translate_bool
},
'hdmi_ignore_edid': {'setting_value' : '',
'default': 'false',
'translate': self.translate_bool
},
'store_hdmi_to_file': {'setting_value' : '',
'default': 'false',
'translate': self.translate_store_hdmi,
},
'hdmi_edid_file': {'setting_value' : '',
'default': 'false',
'translate': self.translate_bool
},
'hdmi_force_hotplug': {'setting_value' : '',
'default': 'false',
'translate': self.translate_bool,
},
'hdmi_ignore_cec': {'setting_value' : '',
'default': 'false',
'translate': self.translate_bool
},
'hdmi_ignore_cec_init ': {'setting_value' : '',
'default': 'true',
'translate': self.translate_bool
},
'hdmi_boost': {'setting_value' : '',
'default': '0',
},
'hdmi_group': {'setting_value' : '',
'default': '0',
},
'hdmi_mode': {'setting_value' : '',
'default': '0',
},
'hdmi_pixel_encoding': {'setting_value' : '',
'default': '0',
},
'display_rotate': {'setting_value' : '',
'default': '0',
},
'sdtv_mode': {'setting_value' : '',
'default': '0',
},
'sdtv_aspect': {'setting_value' : '',
'default': '0',
'translate': self.translate_sdtv_aspect
},
'gpu_mem': {'setting_value' : '',
'default': 'false',
'translate': self.translate_gpu_mem
},
'gpu_mem_256': {'setting_value' : '',
'default': '112',
},
'gpu_mem_512': {'setting_value' : '',
'default': '144',
},
'gpu_mem_1024': {'setting_value' : '',
'default': '256',
},
'decode_MPG2': {'setting_value' : '',
'default': '',
},
'decode_WVC1': {'setting_value' : '',
'default': '',
},
'max_usb_current': {'setting_value' : '',
'default': 'false',
'translate': self.translate_bool,
},
'dtoverlay': {'setting_value' : '',
'default': '',
'translate': self.translate_dtoverlay,
},
'device_tree': {'setting_value' : '',
'default': '',
'translate': self.translate_device_tree,
},
# 'other_settings_string': {'setting_value' : '',
# 'default': '',
# 'translate': self.translate_other_string
# },
}
# list to hold the keys for the other string settings
self.unknown_setting_keys = []
# list to hold the keys for the settings that need to be removed from the config.txt
self.remove_list = []
# the location of the config file FOR TESTING ONLY
try:
self.test_config = '/boot/config.txt'
# populate the settings data in the pi_settings_dict
# self.populate_pi_settings_dict()
# a flag to determine whether a setting change requires a reboot to take effect
self.reboot_required = False
# grab the Pi serial number and check to see whether the codec licences are enabled
mpg = subprocess.check_output(["/opt/vc/bin/vcgencmd", "codec_enabled", "MPG2"])
wvc = subprocess.check_output(["/opt/vc/bin/vcgencmd", "codec_enabled", "WVC1"])
serial_raw = subprocess.check_output(["cat", "/proc/cpuinfo"])
# grab just the serial number
serial = serial_raw[serial_raw.index('Serial') + len('Serial'):].replace('\n','').replace(':','').replace(' ','').replace('\t','')
# load the values into the settings gui
__addon__.setSetting('codec_check', mpg.replace('\n','') + ', ' + wvc.replace('\n',''))
__addon__.setSetting('serial', serial)
except:
self.test_config = '/home/kubkev/Documents/config.txt'
log('START')
for x, k in self.pi_settings_dict.iteritems():
log("%s = %s" % (x, k.get('setting_value','no setting value')))
@clog(log)
def populate_pi_settings_dict(self):
'''
Populates the setting_value in the pi_settings_dict.
'''
# # this is the method to use if you are populating the dict from the settings.xml
# latest_settings = self.settings_retriever_xml()
# but I am going to set up my own process in addition to the xml one, I will be reading some
# settings from the config.txt, and getting the rest from the settings.xml
self.config_settings = ct.read_config(self.test_config)
log('Config settings received from the parser: %s' % self.config_settings)
# cycle through the pi_settings_dict dict, and populate with the settings values
for key in self.pi_settings_dict.keys():
# if the value of the setting is to be assigned by another setting, then just ignore it here
# note: this will mean that the other setting will have to populate both the settings_dict and the settings.xml
if key in self.values_set_elsewhere:
continue
# grab the translate method (if there is one)
translate_method = self.pi_settings_dict.get(key,{}).get('translate',{})
# if the key is in the config.txt
if key in self.config_settings:
setting_value = self.config_settings[key]
else:
# if the key ISNT in the config.txt then set the value from the default stored in
# the pi_settings_dict dict
setting_value = self.pi_settings_dict[key].get('default','')
# get the setting value, translate it if needed
if translate_method:
setting_value = translate_method(setting_value)
# if default is setting_value, then the setting has been set in the translation so ignore it
if setting_value not in self.not_going_to_config:
self.pi_settings_dict[key]['setting_value'] = setting_value
# also set the value in the settings.xml
self.me.setSetting(key, str(setting_value))
@clog(log, nowait=True)
def run(self):
'''
The method determines what happens when the item is clicked in the settings GUI.
Usually this would be __addon__.OpenSettings(), but it could be any other script.
This allows the creation of action buttons in the GUI, as well as allowing developers to script and skin their
own user interfaces.
'''
# read the config.txt file everytime the settings are opened. This is unavoidable because it is possible for
# the user to have made manual changes to the config.txt while OSG is active.
self.populate_pi_settings_dict()
for x, k in self.pi_settings_dict.iteritems():
log("%s = %s" % (x, k.get('setting_value','no setting value')))
self.me.openSettings()
# code placed here will run when the modules settings window is closed
self.apply_permitted = True
self.apply_settings()
self.apply_permitted = False
# apply_permitted will prevent the apply function being called by anything other than this method.
# This stops it from being called twice, once when the settings are closed and another when the OSG is closed
''' FOR TESTING ONLY '''
log('END')
for x, k in self.pi_settings_dict.iteritems():
log("%s = %s" % (x, k.get('setting_value','no setting value')))
@clog(log)
def apply_settings(self):
'''
This method will apply all of the settings. It calls the first_method, if it exists.
Then it calls the method listed in pi_settings_dict for each setting. Then it calls the
final_method, again, if it exists.
'''
# this prevents the method running when called by the OSG. Rather, the method is only being run when the settings
# window is closed.
if not self.apply_permitted:
return 'apply not permitted'
# retrieve the current settings from the settings.xml (this is where the user has made changes)
new_settings = self.settings_retriever_xml()
# dict to hold the keys of the changed settings
self.changed_settings = {}
# call the first method, if there is one
self.first_method()
# apply the individual settings changes
for k, v in self.pi_settings_dict.iteritems():
# if the value of the setting is set elsewhere, then the adding of the settings to changed settings will also
# have to be handled by the apply method of that other setting.
if k in self.values_set_elsewhere:
continue
# get the application method and stored setting value from the dictionary
method = v.get('apply', False)
value = v.get('setting_value', '')
# if the new setting is different to the stored setting then change the dict and run the 'apply' method
if new_settings[k] != value:
# change stored setting_value to the new value
self.pi_settings_dict[k]['setting_value'] = new_settings[k]
# add it to the changed settings dict
self.changed_settings[k] = new_settings[k]
# if a specific apply method exists for the setting, then call that
try:
method(new_settings[k])
except:
pass
# call the final method if there is one
self.final_method()
ok = DIALOG.notification(lang(32095), lang(32096))
def settings_retriever_xml(self):
'''
Reads the stored settings (in settings.xml) and returns a dictionary with the setting_name: setting_value. This
method cannot be overwritten.
'''
latest_settings = {}
addon = xbmcaddon.Addon(self.addonid)
for key in self.pi_settings_dict.keys():
latest_settings[key] = addon.getSetting(key)
return latest_settings
##############################################################################################################################
# #
def first_method(self):
'''
The method to call before all the other setting methods are called.
For example, this could be a call to stop a service. The final method could then restart the service again.
This can be used to apply the setting changes.
'''
@clog(log)
def final_method(self):
'''
The method to call after all the other setting methods have been called.
For example, in the case of the Raspberry Pi's settings module, the final writing to the config.txt can be delayed
until all the settings have been updated in the pi_settings_dict.
'''
''' This method will write the changed settings to the config.txt file. '''
# translate the changed settings into values that can be used in the config.txt
self.translated_changed_settings = {}
for k, v in self.changed_settings.iteritems():
# translate the setting if needed
# in some cases this translation can be used to set the values for other settings and have them added
# to the translated_changed_settings dict
translate_method = self.pi_settings_dict.get(k,{}).get('translate', False)
if translate_method:
value = translate_method(v, reverse=True)
else:
value = v #.get('setting_value','')
# if the setting is not to be added to the config.txt, then dont add it to the self.translated_changed_settings dict
if k in self.not_going_to_config:
continue
# # if this is the other_settings_string then break up into the individual settings
# elif k == 'other_settings_string':
# for key, svalue in value.iteritems():
# self.translated_changed_settings[key] = svalue
# add the setting to the translated settings dict, this is now ready to send to the config.txt writer
else:
self.translated_changed_settings[k] = value
# transfer the remove list into the changes dict
# this will make sure that existing settings in the config.txt that need to be removed, will be removed
for remove_key in self.remove_list:
self.translated_changed_settings[remove_key] = 'remove'
# reset the remove list
self.remove_list = []
# start_x=1 added by default to every config.txt
# popcornmix: I would remove start_file=start_x.elf and fixup_file=fixup_x.dat and use the shortcut start_x=1
self.translated_changed_settings['start_x'] = 1
# write the settings to the config.txt
ct.write_config(self.test_config, self.translated_changed_settings)
def boot_method(self):
'''
The method to call when the OSA is first activated (on reboot)
'''
pass
# #
##############################################################################################################################
##############################################################################################################################
# #
'''
Methods beyond this point are for specific settings.
'''
def translate_sdtv_aspect(self, data, reverse=False):
''' Method to translate the sdtv_aspect from 0 based index to 1 based '''
if not reverse:
if data:
return int(data) - 1
else:
return 0
else:
return int(data) + 1
def translate_bool(self, data, reverse=False):
''' method to convert number or text into boolean '''
if not reverse:
if data in [1, '1']:
return 'true'
else:
return 'false'
else:
if data in [1, '1', 'true']:
return '1'
else:
return '0'
# def translate_other_string(self, data='', reverse=False):
# '''
# Method to collate all the unknown settings from the config.txt into a single string, delimited by |:-:|.
# The reverse function returns a dictionary with {setting_name: setting_value, ... }
# '''
# if not reverse:
# config_keys = set(self.config_settings.keys())
# xml_keys = set(self.pi_settings_dict.keys())
# self.unknown_setting_keys = list(config_keys.difference(xml_keys))
# unknown_settings = [str(x) + '=' + str(self.config_settings[x]) for x in self.unknown_setting_keys]
# return "|:-:|".join(unknown_settings)
# else:
# no_space_data = data.replace(" ",'')
# setting_pairs = no_space_data.split("|:-:|")
# other_settings = []
# for setting in setting_pairs:
# set_list = setting.split('=')
# if len(set_list) == 2:
# other_settings.append(tuple(set_list))
# new_unknown_settings = dict(other_settings)
# # construct a list of keys that are in self.unknown_setting_keys but not in new_unknown_settings_keys
# new_unknown_settings_keys = set(new_unknown_settings.keys())
# unknown_settings_keys = set(self.unknown_setting_keys)
# removals = list(unknown_settings_keys.difference(new_unknown_settings_keys))
# # setup the removed unknown settings to be removed from the config.txt
# for rem in removals:
# new_unknown_settings[rem] = 'remove'
# # change the self.unknown_setting_keys list to the current list of unknown keys
# self.unknown_setting_keys = list(new_unknown_settings_keys)
# return new_unknown_settings
@clog(log)
def translate_device_tree(self, data, reverse=False):
'''
Checks for the presence of an empty device_tree setting, which disables device tree overlays.
'''
datalist = data.split('\n')
if not reverse:
if 'device_tree' in self.config_settings and '' in datalist:
self.me.setSetting('suppress_dtoverlay', 'true')
else:
self.me.setSetting('suppress_dtoverlay', 'false')
else:
if self.me.getSetting('suppress_dtoverlay') == 'true':
return ['']
else:
return ['[remove]']
@clog(log)
def translate_dtoverlay(self, data, reverse=False):
'''
Parses the dtoverlay list. There can be multiple dtoverlays, so the config_tool puts them all into
a single list.
'''
# setting: the set of settings in the group
# value: the value to assign to the kodi displayed settings if the overlay is active
overlay_settings = {
'hifiberry-dac-overlay' : {'setting': 'soundcard_dac', 'value': '1'},
'hifiberry-dacplus-overlay' : {'setting': 'soundcard_dac', 'value': '2'},
'hifiberry-digi-overlay' : {'setting': 'soundcard_dac', 'value': '3'},
'iqaudio-dac-overlay' : {'setting': 'soundcard_dac', 'value': '4'},
'iqaudio-dacplus-overlay' : {'setting': 'soundcard_dac', 'value': '5'},
'w1-gpio-overlay' : {'setting': 'w1gpio', 'value': '1'},
'w1-gpio-pullup-overlay' : {'setting': 'w1gpio', 'value': '2'},
'lirc-rpi-overlay' : {'setting': 'lirc-rpi-overlay', 'value': 'true'},
'spi-bcm2835-overlay' : {'setting': 'spi-bcm2835-overlay', 'value': 'true'},
}
dac_all = ['hifiberry-dac-overlay', 'hifiberry-dacplus-overlay','hifiberry-digi-overlay', 'iqaudio-dac-overlay','iqaudio-dacplus-overlay']
w1gpio = ['w1-gpio-overlay', 'w1-gpio-pullup-overlay']
datalist = data.split('\n')
log('datalist = %s' % datalist)
if not reverse:
# do this when reading the items into Kodi
self.me.setSetting('lirc-rpi-overlay', 'false')
self.me.setSetting('spi-bcm2835-overlay', 'false')
self.me.setSetting('soundcard_dac', '0')
self.me.setSetting('w1gpio', '0')
# dtoverlay=lirc-rpi:gpio_out_pin=19,gpio_in_pin=23,gpio_in_pull=down
for overlay in datalist:
log('individual overlay=%s' % overlay)
# lirc has to be handled individually as it may include extra parameters
if 'lirc-rpi' in overlay:
self.me.setSetting('lirc-rpi-overlay', 'true')
sub_params = ['gpio_out_pin', 'gpio_in_pin', 'gpio_in_pull']
if ':' in overlay:
params = [x.split('=') for x in overlay[overlay.index(':')+1:].split(',')]
log('lirc-rpi params=%s' % params)
for param in params:
for sub in sub_params:
if param[0] == sub:
self.me.setSetting(sub, param[1].strip())
continue
if overlay not in overlay_settings:
log('%s not in overlay_settings' % overlay)
continue
else:
ovl = overlay_settings[overlay]
self.me.setSetting(ovl['setting'], ovl['value'])
log('overlay: %s, setting: %s, value: %s' % (overlay, ovl['setting'], ovl['value']))
else:
# do this when writing the Kodi settings back to config.txt
new_dtoverlay = []
pos = self.me.getSetting('soundcard_dac')
if pos == '0':
new_dtoverlay.extend([x + '[remove]' for x in dac_all])
else:
soundcard = dac_all[int(pos)-1]
# add the soundcard overlay
new_dtoverlay.append(soundcard)
#remove the unneeded entries
new_dtoverlay.extend([x + '[remove]' for x in dac_all if x != soundcard])
wgp = self.me.getSetting('w1gpio')
if wgp != '0':
new_dtoverlay.append(w1gpio[int(wgp)-1])
else:
new_dtoverlay.extend([x + '[remove]' for x in w1gpio])
rpi = self.me.getSetting('lirc-rpi-overlay')
if rpi == 'true':
# dtoverlay=lirc-rpi:gpio_out_pin=19,gpio_in_pin=23,gpio_in_pull=down
out_pin = self.me.getSetting('gpio_out_pin')
in_pin = self.me.getSetting('gpio_in_pin')
pull_pin = self.me.getSetting('gpio_in_pull')
lirc = 'lirc-rpi:' + 'gpio_out_pin=' + str(out_pin) + ',gpio_in_pin=' + str(in_pin)
if pull_pin != 'off':
lirc = lirc + ',gpio_in_pull=' + pull_pin
new_dtoverlay.append(lirc)
else:
new_dtoverlay.append('lirc-rpi-overlay' + '[remove]')
spi = self.me.getSetting('spi-bcm2835-overlay')
if spi == 'true':
new_dtoverlay.append('spi-bcm2835-overlay')
else:
new_dtoverlay.append('spi-bcm2835-overlay' + '[remove]')
log("NEW DT OVERLAY = %s" % new_dtoverlay)
return new_dtoverlay
@clog(log)
def translate_store_hdmi(self, data, reverse=False):
'''
Sets the settings_dict and settings.xml values for hdmi_edid_file and hdmi_force_hotplug.
'''
if not reverse:
# set the pi_settings_dict and settings.xml values for these two settings
hdmi_edid_file = self.translate_bool(self.config_settings.get('hdmi_edid_file', 0))
hdmi_force_hotplug = self.translate_bool(self.config_settings.get('hdmi_force_hotplug', 0))
# popcornmix says that if either of these settings are active, then both should be active
tethered_settings = all([hdmi_edid_file=='true' , hdmi_force_hotplug=='true'])
self.pi_settings_dict['hdmi_edid_file']['setting_value'] = tethered_settings
self.pi_settings_dict['hdmi_force_hotplug']['setting_value'] = tethered_settings
# return the appropriate value for the parent setting
if tethered_settings: return 'true'
return 'false'
else:
# if the parent setting is true, then the child settings should be set to one
# if it isnt true, then both settings should be removed from the config.txt
if data == 'true':
self.translated_changed_settings['hdmi_edid_file'] = '1'
self.translated_changed_settings['hdmi_force_hotplug'] = '1'
# run the sub_process : "tvservice -d /boot/edid.dat"
subprocess.call(["sudo", "/opt/vc/bin/tvservice", "-d", "/boot/edid.dat"])
else:
# if the parent setting is false, then remove these two child settings from the config.xml
self.remove_list.append('hdmi_edid_file')
self.remove_list.append('hdmi_force_hotplug')
return 'remove'
@clog(log)
def translate_gpu_mem(self, data, reverse=False):
'''
If gpu_mem is present in the config.txt, then apply it to both gpu_mem_256 and gpu_mem_512.
Any new config.txt should be missing the gpu_mem setting.
'''
if not reverse:
memgpu = self.config_settings.get('gpu_mem', False)
# if gpu_mem is not in the config.txt then just return
if not memgpu:
return 'remove'
# set gpu_mem for removal from the config.txt
self.remove_list.append('gpu_mem')
# get the values for the other memory setting variants
mem1024 = self.config_settings.get('gpu_mem_1024', False)
mem512 = self.config_settings.get('gpu_mem_512', False)
mem256 = self.config_settings.get('gpu_mem_256', False)
if mem1024:
return 'remove'
elif memgpu:
# set the value in the pi_settings_dict and the settings.xml for display
val1024 = min(768, int(memgpu))
self.me.setSetting('gpu_mem_1024', str(val1024))
self.pi_settings_dict['gpu_mem_1024']['setting_value'] = val1024
# if gpu_mem_512 is in the config, then use that, otherwise use gpu_mem, otherwise use default
if mem512:
return 'remove'
elif memgpu:
# set the value in the pi_settings_dict and the settings.xml for display
val512 = min(448, int(memgpu))
self.me.setSetting('gpu_mem_512', str(val512))
self.pi_settings_dict['gpu_mem_512']['setting_value'] = val512
# if gpu_mem_256 is in the config, then use that, otherwise use gpu_mem, otherwise use default
if mem256:
return 'remove'
elif memgpu:
# set the value in the pi_settings_dict and the settings.xml for display
val256 = min(192, int(memgpu))
self.me.setSetting('gpu_mem_256', str(val256))
self.pi_settings_dict['gpu_mem_256']['setting_value'] = val256
return 'remove'
else:
return 'remove'
# #
##############################################################################################################################
if __name__ == "__main__":
pass
|
melvon22/osmc
|
package/mediacenter-addon-osmc/src/script.module.osmcsetting.pi/resources/osmc/OSMCSetting.py
|
Python
|
gpl-2.0
| 31,988
|
[
"VisIt"
] |
6942a23ed4d167ec180cac688d0eb94087d7cf140865322389b4786912ceaf0b
|
import time
import warnings
import numpy as np
import tensorflow as tf
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.sac.policies import SACPolicy
from stable_baselines import logger
class SAC(OffPolicyRLModel):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo
(https://github.com/rail-berkeley/softlearning/)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("polyak update", between 0 and 1)
:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param train_freq: (int) Update the model every `train_freq` steps.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_update_interval: (int) update the target network every `target_network_update_freq` steps.
:param gradient_steps: (int) How many gradient update after each step
:param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto')
:param action_noise: (ActionNoise) the action noise type (None by default), this can help
for hard exploration problem. Cf DDPG for the different action noise type.
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for SAC normally but can help exploring when using HER + SAC.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
Note: this has no effect on SAC logging for now
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000,
learning_starts=100, train_freq=1, batch_size=64,
tau=0.005, ent_coef='auto', target_update_interval=1,
gradient_steps=1, target_entropy='auto', action_noise=None,
random_exploration=0.0, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=None):
super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose,
policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.tau = tau
# In the original paper, same learning rate is used for all networks
# self.policy_lr = learning_rate
# self.qf_lr = learning_rate
# self.vf_lr = learning_rate
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.gradient_steps = gradient_steps
self.gamma = gamma
self.action_noise = action_noise
self.random_exploration = random_exploration
self.value_fn = None
self.graph = None
self.replay_buffer = None
self.sess = None
self.tensorboard_log = tensorboard_log
self.verbose = verbose
self.params = None
self.summary = None
self.policy_tf = None
self.target_entropy = target_entropy
self.full_tensorboard_log = full_tensorboard_log
self.obs_target = None
self.target_policy = None
self.actions_ph = None
self.rewards_ph = None
self.terminals_ph = None
self.observations_ph = None
self.action_target = None
self.next_observations_ph = None
self.value_target = None
self.step_ops = None
self.target_update_op = None
self.infos_names = None
self.entropy = None
self.target_params = None
self.learning_rate_ph = None
self.processed_obs_ph = None
self.processed_next_obs_ph = None
self.log_ent_coef = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
deterministic_action = unscale_action(self.action_space, self.deterministic_action)
return policy.obs_ph, self.actions_ph, deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.buffer_size)
with tf.variable_scope("input", reuse=False):
# Create policy and target TF objects
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
self.target_policy = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
# Initialize Placeholders
self.observations_ph = self.policy_tf.obs_ph
# Normalized observation for pixels
self.processed_obs_ph = self.policy_tf.processed_obs
self.next_observations_ph = self.target_policy.obs_ph
self.processed_next_obs_ph = self.target_policy.processed_obs
self.action_target = self.target_policy.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape,
name='actions')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
with tf.variable_scope("model", reuse=False):
# Create the policy
# first return value corresponds to deterministic actions
# policy_out corresponds to stochastic actions, used for training
# logp_pi is the log probability of actions taken by the policy
self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph)
# Monitor the entropy of the policy,
# this is not used for training
self.entropy = tf.reduce_mean(self.policy_tf.entropy)
# Use two Q-functions to improve performance by reducing overestimation bias.
qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph,
create_qf=True, create_vf=True)
qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph,
policy_out, create_qf=True, create_vf=False,
reuse=True)
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == 'auto':
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
# Default initial value of ent_coef when learned
init_value = 1.0
if '_' in self.ent_coef:
init_value = float(self.ent_coef.split('_')[1])
assert init_value > 0., "The initial value of ent_coef must be greater than 0"
self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32,
initializer=np.log(init_value).astype(np.float32))
self.ent_coef = tf.exp(self.log_ent_coef)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef = float(self.ent_coef)
with tf.variable_scope("target", reuse=False):
# Create the value network
_, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph,
create_qf=False, create_vf=True)
self.value_target = value_target
with tf.variable_scope("loss", reuse=False):
# Take the min of the two Q-Values (Double-Q Learning)
min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
# Target for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * self.value_target
)
# Compute Q-Function loss
# TODO: test with huber loss (it would avoid too high values)
qf1_loss = 0.5 * tf.reduce_mean((q_backup - qf1) ** 2)
qf2_loss = 0.5 * tf.reduce_mean((q_backup - qf2) ** 2)
# Compute the entropy temperature loss
# it is used when the entropy coefficient is learned
ent_coef_loss, entropy_optimizer = None, None
if not isinstance(self.ent_coef, float):
ent_coef_loss = -tf.reduce_mean(
self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy))
entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
# Compute the policy loss
# Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi)
policy_kl_loss = tf.reduce_mean(self.ent_coef * logp_pi - qf1_pi)
# NOTE: in the original implementation, they have an additional
# regularization loss for the Gaussian parameters
# this is not used for now
# policy_loss = (policy_kl_loss + policy_regularization_loss)
policy_loss = policy_kl_loss
# Target for value fn regression
# We update the vf towards the min of two Q-functions in order to
# reduce overestimation bias from function approximation error.
v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi)
value_loss = 0.5 * tf.reduce_mean((value_fn - v_backup) ** 2)
values_losses = qf1_loss + qf2_loss + value_loss
# Policy train op
# (has to be separate from value train op, because min_qf_pi appears in policy_loss)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
values_params = tf_util.get_trainable_vars('model/values_fn')
source_params = tf_util.get_trainable_vars("model/values_fn")
target_params = tf_util.get_trainable_vars("target/values_fn")
# Polyak averaging for target variables
self.target_update_op = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
# Initializing target to match source variables
target_init_op = [
tf.assign(target, source)
for target, source in zip(target_params, source_params)
]
# Control flow is used because sess.run otherwise evaluates in nondeterministic order
# and we first need to compute the policy action before computing q values losses
with tf.control_dependencies([policy_train_op]):
train_values_op = value_optimizer.minimize(values_losses, var_list=values_params)
self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy']
# All ops to call during one training step
self.step_ops = [policy_loss, qf1_loss, qf2_loss,
value_loss, qf1, qf2, value_fn, logp_pi,
self.entropy, policy_train_op, train_values_op]
# Add entropy coefficient optimization operation if needed
if ent_coef_loss is not None:
with tf.control_dependencies([train_values_op]):
ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef)
self.infos_names += ['ent_coef_loss', 'ent_coef']
self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef]
# Monitor losses and entropy in tensorboard
tf.summary.scalar('policy_loss', policy_loss)
tf.summary.scalar('qf1_loss', qf1_loss)
tf.summary.scalar('qf2_loss', qf2_loss)
tf.summary.scalar('value_loss', value_loss)
tf.summary.scalar('entropy', self.entropy)
if ent_coef_loss is not None:
tf.summary.scalar('ent_coef_loss', ent_coef_loss)
tf.summary.scalar('ent_coef', self.ent_coef)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
# Retrieve parameters that must be saved
self.params = tf_util.get_trainable_vars("model")
self.target_params = tf_util.get_trainable_vars("target/values_fn")
# Initialize Variables and target network
with self.sess.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(target_init_op)
self.summary = tf.summary.merge_all()
def _train_step(self, step, writer, learning_rate):
# Sample a batch from the replay buffer
batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards.reshape(self.batch_size, -1),
self.terminals_ph: batch_dones.reshape(self.batch_size, -1),
self.learning_rate_ph: learning_rate
}
# out = [policy_loss, qf1_loss, qf2_loss,
# value_loss, qf1, qf2, value_fn, logp_pi,
# self.entropy, policy_train_op, train_values_op]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + self.step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(self.step_ops, feed_dict)
# Unpack to monitor losses and entropy
policy_loss, qf1_loss, qf2_loss, value_loss, *values = out
# qf1, qf2, value_fn, logp_pi, entropy, *_ = values
entropy = values[4]
if self.log_ent_coef is not None:
ent_coef_loss, ent_coef = values[-2:]
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy
def learn(self, total_timesteps, callback=None,
log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
start_time = time.time()
episode_rewards = [0.0]
episode_successes = []
if self.action_noise is not None:
self.action_noise.reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
n_updates = 0
infos_values = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
for step in range(total_timesteps):
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy
# if random_exploration is set to 0 (normal setting)
if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.env.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
action = self.policy_tf.step(obs[None], deterministic=False).flatten()
# Add noise to the action (improve exploration,
# not needed in general)
if self.action_noise is not None:
action = np.clip(action + self.action_noise(), -1, 1)
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
# Store transition in the replay buffer.
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
self.ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, self.num_timesteps)
if self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
# Break if the warmup phase is not over
# or if there are not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) \
or self.num_timesteps < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
callback.on_rollout_start()
episode_rewards[-1] += reward_
if done:
if self.action_noise is not None:
self.action_noise.reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
# substract 1 as we appended a new term just now
num_episodes = len(episode_rewards) - 1
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and num_episodes % log_interval == 0:
fps = int(step / (time.time() - start_time))
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - start_time))
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
infos_values = []
callback.on_training_end()
return self
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
if actions is not None:
raise ValueError("Error: SAC does not have action probabilities.")
warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it "
"is squashed by a tanh before being scaled and outputed.")
return None
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions = self.policy_tf.step(observation, deterministic=deterministic)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def get_parameter_list(self):
return (self.params +
self.target_params)
def save(self, save_path, cloudpickle=False):
data = {
"learning_rate": self.learning_rate,
"buffer_size": self.buffer_size,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"batch_size": self.batch_size,
"tau": self.tau,
"ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto',
"target_entropy": self.target_entropy,
# Should we also store the replay buffer?
# this may lead to high memory usage
# with all transition inside
# "replay_buffer": self.replay_buffer
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"action_noise": self.action_noise,
"random_exploration": self.random_exploration,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
|
hill-a/stable-baselines
|
stable_baselines/sac/sac.py
|
Python
|
mit
| 30,431
|
[
"Gaussian"
] |
74f210fe1ee424be07c8d5fb09f1be454ce25563fd8b9832a1125e8442411228
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wide ResNet 28-10 with SNGP on CIFAR-10.
Spectral-normalized neural GP (SNGP) [1] is a simple method to improve
a deterministic neural network's uncertainty by applying spectral
normalization to the hidden layers, and then replace the dense output layer
with a Gaussian process layer.
## Reproducibility Instruction for CIFAR-100:
When running this script on CIFAR-100, set base_learning_rate=0.04 and
gp_mean_field_factor=7.5 to reproduce the benchmark result.
## Combining with MC Dropout:
As a single-model method, SNGP can be combined with other classic
uncertainty techniques (e.g., Monte Carlo dropout, deep ensemble) to further
improve performance.
This script supports adding Monte Carlo dropout to
SNGP by setting `use_mc_dropout=True`, setting `num_dropout_samples=10`
(or any integer larger than 1). Additionally we recommend adjust
`gp_mean_field_factor` slightly, since averaging already calibrated
individual models (in this case single SNGPs) can sometimes lead to
under-confidence [3].
## Note:
Different from the paper, this implementation computes the posterior using the
Laplace approximation based on the Gaussian likelihood (i.e., squared loss)
rather than that based on cross-entropy loss. As a result, the logits for all
classes share the same covariance. In the experiments, this approach is shown to
perform better and computationally more scalable when the number of output
classes are large.
## References:
[1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with
Deterministic Deep Learning via Distance Awareness.
_arXiv preprint arXiv:2006.10108_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
[3]: Rahul Rahaman, Alexandre H. Thiery. Uncertainty Quantification and Deep
Ensembles. _arXiv preprint arXiv:2007.08792_, 2020.
https://arxiv.org/abs/2007.08792
[4]: Hendrycks, Dan et al. AugMix: A Simple Data Processing Method to Improve
Robustness and Uncertainty. In _International Conference on Learning
Representations_, 2020.
https://arxiv.org/abs/1912.02781
[5]: Zhang, Hongyi et al. mixup: Beyond Empirical Risk Minimization. In
_International Conference on Learning Representations_, 2018.
https://arxiv.org/abs/1710.09412
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import ood_utils # local file import from baselines.cifar
import utils # local file import from baselines.cifar
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer(
'total_batch_size',
256,
'The total train (and test) batch size, split across all devices.')
# Data Augmentation flags.
flags.DEFINE_bool('augmix', False,
'Whether to perform AugMix [4] on the input data.')
flags.DEFINE_integer('aug_count', 1,
'Number of augmentation operations in AugMix to perform '
'on the input image. In the simgle model context, it'
'should be 1. In the ensembles context, it should be'
'ensemble_size if we perform random_augment only; It'
'should be (ensemble_size - 1) if we perform augmix.')
flags.DEFINE_float('augmix_prob_coeff', 0.5, 'Augmix probability coefficient.')
flags.DEFINE_integer('augmix_depth', -1,
'Augmix depth, -1 meaning sampled depth. This corresponds'
'to line 7 in the Algorithm box in [4].')
flags.DEFINE_integer('augmix_width', 3,
'Augmix width. This corresponds to the k in line 5 in the'
'Algorithm box in [4].')
flags.DEFINE_float('mixup_alpha', 0., 'Mixup hyperparameter, 0. to diable.')
# Dropout flags
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_bool('use_filterwise_dropout', True,
'Whether to use filterwise dropout for the hidden layers.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate.')
flags.DEFINE_integer('num_dropout_samples', 1,
'Number of dropout samples to use for prediction.')
flags.DEFINE_integer('num_dropout_samples_training', 1,
'Number of dropout samples for training.')
# SNGP flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', -1,
'The dimension to reduce the neural network input for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1, no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the classic '
'GP learning.')
flags.DEFINE_string(
'gp_random_feature_type', 'orf',
'The type of random feature to use. One of "rff" (random fourier feature), '
'"orf" (orthogonal random feature).')
flags.DEFINE_float('gp_cov_ridge_penalty', 1.,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', -1.,
'The discount factor to compute the moving average of precision matrix'
'across epochs. If -1 then compute the exact precision matrix within the '
'latest epoch.')
flags.DEFINE_float(
'gp_mean_field_factor', 20.,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
# OOD flags.
flags.DEFINE_bool(
'eval_only', False,
'Whether to run only eval and (maybe) OOD steps.')
flags.DEFINE_bool('eval_on_ood', True,
'Whether to run OOD evaluation on specified OOD datasets.')
flags.DEFINE_list('ood_dataset', 'cifar10,svhn_cropped',
'list of OOD datasets to evaluate on.')
flags.DEFINE_integer(
'ood_interval', 25, 'Number of epochs between evaluating on OOD metrics.'
' Use -1 to never evaluate.')
flags.DEFINE_string('saved_model_dir', None,
'Directory containing the saved model checkpoints.')
flags.DEFINE_bool('dempster_shafer_ood', True,
'Wheter to use DempsterShafer Uncertainty score.')
flags.DEFINE_list(
'ood_tpr_threshold', ['0.8', '0.95'],
'Threshold for true positive rate where False positive rate evaluated.')
# Redefining default values
flags.FLAGS.set_default('base_learning_rate', 0.08)
flags.FLAGS.set_default('l2', 3e-4)
flags.FLAGS.set_default('train_epochs', 250)
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
# Split the seed into a 2-tuple, for passing into dataset builder.
dataset_seed = (FLAGS.seed, FLAGS.seed + 1)
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
batch_size = FLAGS.total_batch_size // FLAGS.num_dropout_samples_training
test_batch_size = FLAGS.total_batch_size
num_classes = 10 if FLAGS.dataset == 'cifar10' else 100
aug_params = {
'augmix': FLAGS.augmix,
'aug_count': FLAGS.aug_count,
'augmix_depth': FLAGS.augmix_depth,
'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
'augmix_width': FLAGS.augmix_width,
'ensemble_size': 1,
'mixup_alpha': FLAGS.mixup_alpha,
}
validation_proportion = 1. - FLAGS.train_proportion
use_validation_set = validation_proportion > 0.
if FLAGS.dataset == 'cifar10':
dataset_builder_class = ub.datasets.Cifar10Dataset
else:
dataset_builder_class = ub.datasets.Cifar100Dataset
train_dataset_builder = dataset_builder_class(
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
aug_params=aug_params,
validation_percent=validation_proportion,
shuffle_buffer_size=FLAGS.shuffle_buffer_size,
seed=dataset_seed)
train_dataset = train_dataset_builder.load(batch_size=batch_size)
if validation_proportion > 0.:
validation_dataset_builder = dataset_builder_class(
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.VALIDATION,
use_bfloat16=FLAGS.use_bfloat16,
validation_percent=validation_proportion,
drop_remainder=FLAGS.drop_remainder_for_eval)
validation_dataset = validation_dataset_builder.load(
batch_size=test_batch_size)
validation_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
val_sample_size = validation_dataset_builder.num_examples
steps_per_val = steps_per_epoch = int(val_sample_size / test_batch_size)
clean_test_dataset_builder = dataset_builder_class(
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TEST,
use_bfloat16=FLAGS.use_bfloat16,
drop_remainder=FLAGS.drop_remainder_for_eval)
clean_test_dataset = clean_test_dataset_builder.load(
batch_size=test_batch_size)
steps_per_epoch = train_dataset_builder.num_examples // batch_size
steps_per_eval = clean_test_dataset_builder.num_examples // test_batch_size
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
}
if FLAGS.eval_on_ood:
ood_dataset_names = FLAGS.ood_dataset
ood_ds, steps_per_ood = ood_utils.load_ood_datasets(
ood_dataset_names,
clean_test_dataset_builder,
validation_proportion,
test_batch_size,
drop_remainder=FLAGS.drop_remainder_for_eval)
ood_datasets = {
name: strategy.experimental_distribute_dataset(ds)
for name, ds in ood_ds.items()
}
if FLAGS.corruptions_interval > 0:
if FLAGS.dataset == 'cifar100':
data_dir = FLAGS.cifar100_c_path
corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
for corruption_type in corruption_types:
for severity in range(1, 6):
dataset = ub.datasets.get(
f'{FLAGS.dataset}_corrupted',
corruption_type=corruption_type,
severity=severity,
split=tfds.Split.TEST,
data_dir=data_dir,
drop_remainder=FLAGS.drop_remainder_for_eval).load(
batch_size=batch_size)
test_datasets[f'{corruption_type}_{severity}'] = (
strategy.experimental_distribute_dataset(dataset))
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building ResNet model')
if FLAGS.use_spec_norm:
logging.info('Use Spectral Normalization with norm bound %.2f',
FLAGS.spec_norm_bound)
if FLAGS.use_gp_layer:
logging.info('Use GP layer with hidden units %d', FLAGS.gp_hidden_dim)
model = ub.models.wide_resnet_sngp(
input_shape=(32, 32, 3),
batch_size=batch_size,
depth=28,
width_multiplier=10,
num_classes=num_classes,
l2=FLAGS.l2,
use_mc_dropout=FLAGS.use_mc_dropout,
use_filterwise_dropout=FLAGS.use_filterwise_dropout,
dropout_rate=FLAGS.dropout_rate,
use_gp_layer=FLAGS.use_gp_layer,
gp_input_dim=FLAGS.gp_input_dim,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_random_feature_type=FLAGS.gp_random_feature_type,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Linearly scale learning rate and the decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 128
lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
for start_epoch_str in FLAGS.lr_decay_epochs]
lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch,
base_lr,
decay_ratio=FLAGS.lr_decay_ratio,
decay_epochs=lr_decay_epochs,
warmup_epochs=FLAGS.lr_warmup_epochs)
optimizer = tf.keras.optimizers.SGD(lr_schedule,
momentum=1.0 - FLAGS.one_minus_momentum,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/stddev': tf.keras.metrics.Mean(),
}
if use_validation_set:
metrics.update({
'val/negative_log_likelihood': tf.keras.metrics.Mean(),
'val/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'val/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'val/stddev': tf.keras.metrics.Mean(),
})
if FLAGS.eval_on_ood:
ood_metrics = ood_utils.create_ood_metrics(
ood_dataset_names, tpr_list=FLAGS.ood_tpr_threshold)
metrics.update(ood_metrics)
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, 6):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
logging.info('Output dir : %s', FLAGS.output_dir)
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
if FLAGS.saved_model_dir:
logging.info('Saved model dir : %s', FLAGS.saved_model_dir)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.saved_model_dir)
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
if FLAGS.eval_only:
initial_epoch = FLAGS.train_epochs - 1 # Run just one epoch of eval
@tf.function
def train_step(iterator, step):
"""Training StepFn."""
def step_fn(inputs, step):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
# Resetting covaraince estimator at the begining of a new epoch.
if FLAGS.use_gp_layer:
model.layers[-1].reset_covariance_matrix()
if FLAGS.augmix and FLAGS.aug_count >= 1:
# Index 0 at augmix preprocessing is the unperturbed image.
images = images[:, 1, ...]
# This is for the case of combining AugMix and Mixup.
if FLAGS.mixup_alpha > 0:
labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
images = tf.tile(images, [FLAGS.num_dropout_samples_training, 1, 1, 1])
if FLAGS.mixup_alpha > 0:
labels = tf.tile(labels, [FLAGS.num_dropout_samples_training, 1])
else:
labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])
with tf.GradientTape() as tape:
logits = model(images, training=True)
if isinstance(logits, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract logits
logits, _ = logits
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
if FLAGS.mixup_alpha > 0:
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(labels,
logits,
from_logits=True))
else:
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
l2_loss = sum(model.losses)
loss = negative_log_likelihood + l2_loss
# Scale the loss given the TPUStrategy will reduce sum all gradients.
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(logits)
if FLAGS.mixup_alpha > 0:
labels = tf.argmax(labels, axis=-1)
metrics['train/ece'].add_batch(probs, label=labels)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
strategy.run(step_fn, args=(next(iterator), step))
@tf.function
def test_step(iterator, dataset_name, num_steps):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
logits_list = []
stddev_list = []
for _ in range(FLAGS.num_dropout_samples):
logits = model(images, training=False)
if isinstance(logits, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract both
logits, covmat = logits
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
logits = ed.layers.utils.mean_field_logits(
logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor)
else:
covmat = tf.eye(logits.shape[0])
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
stddev = tf.sqrt(tf.linalg.diag_part(covmat))
stddev_list.append(stddev)
logits_list.append(logits)
# Logits dimension is (num_samples, batch_size, num_classes).
logits_list = tf.stack(logits_list, axis=0)
stddev_list = tf.stack(stddev_list, axis=0)
stddev = tf.reduce_mean(stddev_list, axis=0)
probs_list = tf.nn.softmax(logits_list)
probs = tf.reduce_mean(probs_list, axis=0)
logits = tf.reduce_mean(logits_list, axis=0)
labels_broadcasted = tf.broadcast_to(
labels, [FLAGS.num_dropout_samples,
tf.shape(labels)[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits_list, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
tf.math.log(float(FLAGS.num_dropout_samples)))
logging.info('Dataset name : %s', dataset_name)
if dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
metrics['test/stddev'].update_state(stddev)
elif dataset_name == 'val':
metrics['val/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['val/accuracy'].update_state(labels, probs)
metrics['val/ece'].add_batch(probs, label=labels)
metrics['val/stddev'].update_state(stddev)
elif dataset_name.startswith('ood/'):
ood_labels = 1 - inputs['is_in_distribution']
if FLAGS.dempster_shafer_ood:
ood_scores = ood_utils.DempsterShaferUncertainty(logits)
else:
ood_scores = 1 - tf.reduce_max(probs, axis=-1)
# Edgecase for if dataset_name contains underscores
for name, metric in metrics.items():
if dataset_name in name:
metric.update_state(ood_labels, ood_scores)
elif FLAGS.corruptions_interval > 0:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
corrupt_metrics['test/stddev_{}'.format(dataset_name)].update_state(
stddev)
for _ in tf.range(tf.cast(num_steps, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
step_variable = tf.Variable(0, dtype=tf.int32)
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
if not FLAGS.eval_only:
for step in range(steps_per_epoch):
step_variable.assign(step)
# Pass `step` as a tf.Variable to train_step to prevent the tf.function
# train_step() re-compiling itself at each function call.
train_step(train_iterator, step_variable)
current_step = epoch * steps_per_epoch + (step + 1)
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
if step % 20 == 0:
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if use_validation_set:
datasets_to_evaluate['val'] = validation_dataset
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval
logging.info('Starting to run eval at epoch: %s', epoch)
test_start_time = time.time()
test_step(test_iterator, dataset_name, steps_per_eval)
ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
metrics['test/ms_per_example'].update_state(ms_per_example)
logging.info('Done with testing on %s', dataset_name)
if FLAGS.eval_on_ood:
for ood_dataset_name, ood_dataset in ood_datasets.items():
ood_iterator = iter(ood_dataset)
logging.info('Calculating OOD on dataset %s', ood_dataset_name)
logging.info('Running OOD eval at epoch: %s', epoch)
test_step(ood_iterator, ood_dataset_name,
steps_per_ood[ood_dataset_name])
logging.info('Done with OOD eval on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
if use_validation_set:
logging.info('Val NLL: %.4f, Accuracy: %.2f%%',
metrics['val/negative_log_likelihood'].result(),
metrics['val/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if FLAGS.corruptions_interval > 0:
for metric in corrupt_metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
final_save_name = os.path.join(FLAGS.output_dir, 'model')
model.save(final_save_name)
logging.info('Saved model to %s', final_save_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'gp_mean_field_factor': FLAGS.gp_mean_field_factor,
})
if __name__ == '__main__':
app.run(main)
|
google/uncertainty-baselines
|
baselines/cifar/sngp.py
|
Python
|
apache-2.0
| 29,003
|
[
"Gaussian"
] |
856aaf257e097015fba12ac265292e155a5be9a2edd73e1a21ce174269d4d1dc
|
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import base64
import os
from textwrap import dedent
from pypln.backend.workers import Extractor
from utils import TaskTest
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
class TestExtractorWorker(TaskTest):
def test_extraction_from_text_file(self):
expected = "This is a test file.\nI'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'test.txt')
doc_id = self.collection.insert({'filename': filename,
'contents': base64.b64encode(open(filename).read())}, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
self.assertEqual(refreshed_document['file_metadata'], {})
self.assertEqual(refreshed_document['mimetype'], 'text/plain')
def test_extraction_from_html_file(self):
expected = "This is a test file. I'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'test.html')
# When saving directly to mongodb we always get everything back from
# the database as unicode. Because of that, the extractor is having
# problems when there is a non-ascii character in the content. This
# wasn't a problem before because with mongodict we used to keep a
# pickled representation of the data.
data = {'filename': filename,
'contents': base64.b64encode(open(filename).read())}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
self.assertEqual(refreshed_document['file_metadata'], {})
self.assertEqual(refreshed_document['mimetype'], 'text/html')
def test_extraction_from_pdf_file(self):
expected = "This is a test file.\nI'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'test.pdf')
data = {'filename': filename,
'contents': base64.b64encode(open(filename).read())}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
# Check that the expected metadata is a subset of what
# our Extractor found (it may have found more details
# depending on the toolset used to extract metadata)
metadata_expected = {
u'Author': u'Álvaro Justen',
u'Creator': u'Writer',
u'Producer': u'LibreOffice 3.5',
u'CreationDate': u'Fri Jun 1 17:07:57 2012',
u'Tagged': u'no',
u'Pages': u'1',
u'Encrypted': u'no',
u'Page size': u'612 x 792 pts (letter)',
u'Optimized': u'no',
u'PDF version': u'1.4',
}
metadata_expected_set = set(metadata_expected.iteritems())
metadata = refreshed_document['file_metadata']
metadata_set = set(metadata.iteritems())
diff_set = metadata_expected_set - metadata_set
self.assertTrue(metadata_expected_set.issubset(metadata_set),
("Extracted metadata is not a subset of the expected metadata. "
"Items missing or with different values: {}").format(
u", ".join(unicode(item) for item in diff_set)))
self.assertEqual(refreshed_document['mimetype'], 'application/pdf')
def test_extraction_from_html(self):
contents = dedent('''
<html>
<head>
<title>Testing</title>
<script type="text/javascript">this must not appear</script>
<style type="text/css">this must not appear [2]</style>
</head>
<body>
python test1
<br>
test2
<table>
<tr><td>spam</td></tr>
<tr><td>eggs</td></tr>
<tr><td>ham</td></tr>
</table>
test3
<div>test4</div>test5
<span>test6</span>test7
<h1>bla1</h1> bla2
</body>
</html>
''')
data = {'filename': 'test.html',
'contents': base64.b64encode(contents)}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
expected = dedent('''
Testing
python test1
test2
spam
eggs
ham
test3
test4
test5 test6 test7
bla1
bla2''').strip()
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
self.assertEqual(refreshed_document['mimetype'], 'text/html')
def test_language_detection_pt(self):
text_pt = 'Esse texto foi escrito por Álvaro em Português.'
data_pt = {'filename': 'text-pt.txt',
'contents': base64.b64encode(text_pt)}
doc_id = self.collection.insert(data_pt, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['language'], 'pt')
def test_language_detection_es(self):
text_es = 'Este texto ha sido escrito en Español por Álvaro.'
data_es = {'filename': 'text-es.txt',
'contents': base64.b64encode(text_es)}
doc_id = self.collection.insert(data_es, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['language'], 'es')
def test_language_detection_en(self):
text_en = 'This text was written by Álvaro in English.'
data_en = {'filename': 'text-en.txt',
'contents': base64.b64encode(text_en)}
doc_id = self.collection.insert(data_en, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['language'], 'en')
def test_unescape_html_entities(self):
expected = (u"This text has html <entities>. Álvaro asked me to make"
" sure it also has non ascii chars.")
filename = os.path.join(DATA_DIR, 'test_html_entities.txt')
data = {'filename': filename,
'contents': base64.b64encode(open(filename).read())}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
def test_should_detect_encoding_and_return_a_unicode_object(self):
expected = u"Flávio"
filename = os.path.join(DATA_DIR, 'test_iso-8859-1.txt')
data = {'filename': filename,
'contents': base64.b64encode(open(filename).read())}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
self.assertEqual(type(refreshed_document['text']), unicode)
def test_should_guess_mimetype_for_file_without_extension(self):
contents = "This is a test file. I'm testing PyPLN extractor worker!"
filename = os.path.join(DATA_DIR, 'text_file')
data = {'filename': filename,
'contents': base64.b64encode(contents)}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['mimetype'], 'text/plain')
def test_unknown_mimetype_should_be_flagged(self):
filename = os.path.join(DATA_DIR, 'random_file')
# we can't put the expected text content here, so we'll just make sure
# it's equal to the input content, since
contents = open(filename).read()
data = {'filename': filename,
'contents': base64.b64encode(contents)}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['mimetype'], 'unknown')
self.assertEqual(refreshed_document['text'], "")
self.assertEqual(refreshed_document['language'], "")
self.assertEqual(refreshed_document['file_metadata'], {})
def test_unknown_encoding_should_be_ignored(self):
filename = os.path.join(DATA_DIR, 'encoding_unknown_to_libmagic.txt')
expected = u"This file has a weird byte (\x96) that makes it impossible for libmagic to recognize it's encoding."
data = {'filename': filename,
'contents': base64.b64encode(open(filename).read())}
doc_id = self.collection.insert(data, w=1)
Extractor().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
self.assertEqual(refreshed_document['text'], expected)
self.assertEqual(refreshed_document['file_metadata'], {})
self.assertEqual(refreshed_document['language'], 'en')
|
NAMD/pypln.backend
|
tests/test_worker_extractor.py
|
Python
|
gpl-3.0
| 10,233
|
[
"NAMD"
] |
d384ce9acb971a7d786bddad3a945deceb4bc0fdee86a519112433dcf49e2d53
|
#!/usr/bin/env python
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable-msg=C0103, C0302, R0903, R0904, W0201
"""Tests. Split into classes according to what they test."""
from __future__ import print_function, unicode_literals
import mock
import os
import random
import sys
import time
import unittest
import uuid
import warnings
from functools import wraps
from requests.compat import urljoin
from requests.exceptions import HTTPError
from six import text_type
from praw import Reddit, decorators, errors, helpers, internal
from praw.objects import (Comment, LoggedInRedditor, Message, MoreComments,
Submission)
USER_AGENT = 'PRAW_test_suite'
def flair_diff(root, other):
"""Function for comparing two flairlists supporting optional arguments."""
keys = ['user', 'flair_text', 'flair_css_class']
root_items = set(tuple(item[key].lower() if key in item and item[key] else
'' for key in keys) for item in root)
other_items = set(tuple(item[key].lower() if key in item and item[key] else
'' for key in keys) for item in other)
return list(root_items - other_items)
def interactive_only(function):
@wraps(function)
def interactive_only_function(obj):
if os.getenv('INTERACTIVE'):
return function(obj)
print('Passing interactive only test: {0}.{1}'
.format(obj.__class__.__name__, function.__name__))
return interactive_only_function
def local_only(function):
@wraps(function)
def local_only_function(obj):
if not obj.r.config.is_reddit:
return function(obj)
print('Passing local only test: {0}.{1}'
.format(obj.__class__.__name__, function.__name__))
return local_only_function
def reddit_only(function):
@wraps(function)
def reddit_only_function(obj):
if obj.r.config.is_reddit:
return function(obj)
print('Passing reddit only test: {0}.{1}'
.format(obj.__class__.__name__, function.__name__))
return reddit_only_function
def prompt(msg):
sys.stdout.write(msg)
sys.stdout.flush()
response = ''
cur = ''
while cur != '\n':
cur = sys.stdin.read(1)
response += cur
return response.strip()
class BasicHelper(object):
def configure(self):
self.r = Reddit(USER_AGENT, disable_update_check=True)
self.sr = 'reddit_api_test'
self.priv_sr = 'reddit_api_test_priv'
self.un = 'PyAPITestUser2'
self.other_user_name = 'PyAPITestUser3'
self.other_non_mod_name = 'PyAPITestUser4'
self.invalid_user_name = 'PyAPITestInvalid'
self.un_pswd = '1111'
self.other_user_pswd = '1111'
self.other_non_mod_pswd = '1111'
if self.r.config.is_reddit:
self.comment_url = self.url('/r/redditdev/comments/dtg4j/')
self.link_url = self.url('/r/UCSantaBarbara/comments/m77nc/')
self.link_url_link = 'http://imgur.com/Vr8ZZ'
self.more_comments_url = self.url('/r/redditdev/comments/dqkfz/')
self.other_user_id = '6c1xj'
self.priv_submission_id = '16kbb7'
self.refresh_token = {
'edit': 'FFx_0G7Zumyh4AWzIo39bG9KdIM',
'history': 'j_RKymm8srC3j6cxysYFQZbB4vc',
'identity': 'E4BgmO7iho0KOB1XlT8WEtyySf8',
'modconfig': 'bBGRgMY9Ai9_SZLZsaFvS647Mgk',
'modflair': 'UrMbtk4bOa040XAVz0uQn2gTE3s',
'modlog': 'ADW_EDS9-bh7Zicc7ARx7w8ZLMA',
'modposts': 'Ffnae7s4K-uXYZB5ZaYJgh0d8DI',
'mysubreddits': 'O7tfWhqem6fQZqxhoTiLca1s7VA',
'privatemessages': 'kr_pHPO3sqTn_m5f_FX9TW4joEU',
'read': '_mmtb8YjDym0eC26G-rTxXUMea0',
'submit': 'k69WTwa2bEQOQY9t61nItd4twhw',
'subscribe': 'LlqwOLjyu_l6GMZIBqhcLWB0hAE',
'vote': '5RPnDwg56vAbf7F9yO81cXZAPSQ'}
self.submission_edit_id = '16i92b'
else:
self.comment_url = self.url(
'/r/reddit_api_test/comments/iq/_/3a7/')
self.link_url = self.url('/r/reddit_test6/comments/y/')
self.link_url_link = 'http://google.com/?q=29.9093488449'
self.more_comments_url = self.url('/r/reddit_test6/comments/y/')
self.other_user_id = 'pk'
def delay(self, amount=None):
if amount:
time.sleep(amount)
elif self.r.config.api_request_delay == 0:
time.sleep(0.1)
def disable_cache(self):
self.r.config.cache_timeout = 0
def first(self, seq, predicate):
first_hit = next((x for x in seq if predicate(x)), None)
# Usage of self.assertTrue assumes all inheritance of this Class also
# inherits from unittest.Testcase
# pylint: disable-msg=E1101
self.assertTrue(first_hit is not None)
return first_hit
def url(self, path):
# pylint: disable-msg=W0212
return urljoin(self.r.config._site_url, path)
class AuthenticatedHelper(BasicHelper):
def configure(self):
super(AuthenticatedHelper, self).configure()
self.r.login(self.un, self.un_pswd)
class AccessControlTests(unittest.TestCase, BasicHelper):
def setUp(self):
self.configure()
def test_exception_get_flair_list_unauthenticated(self):
self.assertRaises(errors.LoginOrScopeRequired, self.r.get_flair_list,
self.sr)
def test_login_or_oauth_required_not_logged_in(self):
self.assertRaises(errors.LoginOrScopeRequired,
self.r.add_flair_template, self.sr, 'foo')
def test_login_or_oauth_required_not_logged_in_mod_func(self):
self.assertRaises(errors.LoginOrScopeRequired,
self.r.get_settings, self.sr)
def test_login_required_not_logged_in(self):
self.assertRaises(errors.LoginRequired, self.r.accept_moderator_invite,
self.sr)
def test_login_required_not_logged_in_mod_func(self):
self.assertRaises(errors.LoginRequired, self.r.get_banned, self.sr)
def test_oauth_scope_required(self):
self.r.set_oauth_app_info('dummy_client', 'dummy_secret', 'dummy_url')
self.r.set_access_credentials(set('dummy_scope',), 'dummy_token')
self.assertRaises(errors.OAuthScopeRequired, self.r.get_me)
def test_moderator_or_oauth_required_logged_in_from_reddit_obj(self):
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_non_mod_name, self.other_non_mod_pswd)
self.assertRaises(errors.ModeratorOrScopeRequired,
oth.get_settings, self.sr)
def test_moderator_or_oauth_required_logged_in_from_submission_obj(self):
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_non_mod_name, self.other_non_mod_pswd)
submission = oth.get_submission(url=self.comment_url)
self.assertRaises(errors.ModeratorOrScopeRequired, submission.remove)
def test_moderator_or_oauth_required_logged_in_from_subreddit_obj(self):
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_non_mod_name, self.other_non_mod_pswd)
subreddit = oth.get_subreddit(self.sr)
self.assertRaises(errors.ModeratorOrScopeRequired,
subreddit.get_settings)
def test_moderator_required_multi(self):
self.r.login(self.un, self.un_pswd)
sub = self.r.get_subreddit('{0}+{1}'.format(self.sr, 'test'))
self.assertRaises(errors.ModeratorRequired, sub.get_mod_queue)
def test_require_access_failure(self):
self.assertRaises(TypeError, decorators.restrict_access, scope=None,
oauth_only=True)
class BasicTest(unittest.TestCase, BasicHelper):
def setUp(self):
self.configure()
def test_base_36_conversion(self):
self.assertEquals(helpers.convert_numeric_id_to_id36(295), '87')
self.assertEquals(helpers.convert_id36_to_numeric_id('87'), 295)
self.assertEquals(helpers.convert_numeric_id_to_id36(275492), '5wkk')
self.assertEquals(helpers.convert_id36_to_numeric_id('5wkk'), 275492)
self.assertRaises(TypeError, helpers.convert_numeric_id_to_id36)
self.assertRaises(ValueError, helpers.convert_numeric_id_to_id36, '1')
self.assertRaises(ValueError, helpers.convert_numeric_id_to_id36, -1)
self.assertRaises(TypeError, helpers.convert_id36_to_numeric_id)
self.assertRaises(ValueError, helpers.convert_id36_to_numeric_id,
't3_87')
self.assertRaises(ValueError, helpers.convert_id36_to_numeric_id, 87)
def test_comments_contains_no_noncomment_objects(self):
comments = self.r.get_submission(url=self.comment_url).comments
self.assertFalse([item for item in comments if not
(isinstance(item, Comment) or
isinstance(item, MoreComments))])
def test_decode_entities(self):
text = self.r.get_submission(url=self.comment_url).selftext_html
self.assertTrue(text.startswith('<'))
self.assertTrue(text.endswith('>'))
self.r.config.decode_html_entities = True
text = self.r.get_submission(url=self.comment_url).selftext_html
self.assertTrue(text.startswith('<'))
self.assertTrue(text.endswith('>'))
def test_deprecation(self):
with warnings.catch_warnings():
self.r.get_all_comments()
def test_equality(self):
subreddit = self.r.get_subreddit(self.sr)
same_subreddit = self.r.get_subreddit(self.sr)
submission = next(subreddit.get_hot())
self.assertTrue(subreddit == same_subreddit)
self.assertFalse(subreddit != same_subreddit)
self.assertFalse(subreddit == submission)
def test_get_comments(self):
num = 50
result = self.r.get_comments(self.sr, limit=num)
self.assertEqual(num, len(list(result)))
def test_get_comments_gilded(self):
gilded_comments = self.r.get_comments('all', gilded_only=True)
self.assertTrue(all(comment.gilded > 0 for comment in
gilded_comments))
@reddit_only
def test_get_controversial(self):
num = 50
result = self.r.get_controversial(limit=num, params={'t': 'all'})
self.assertEqual(num, len(list(result)))
def test_get_flair_list(self):
self.r.login(self.un, self.un_pswd)
sub = self.r.get_subreddit(self.sr)
self.assertTrue(next(sub.get_flair_list()))
def test_get_front_page(self):
num = 50
self.assertEqual(num, len(list(self.r.get_front_page(limit=num))))
def test_get_new(self):
num = 50
result = self.r.get_new(limit=num)
self.assertEqual(num, len(list(result)))
@reddit_only
def test_get_new_subreddits(self):
num = 50
self.assertEqual(num,
len(list(self.r.get_new_subreddits(limit=num))))
@reddit_only
def test_get_popular_subreddits(self):
num = 50
self.assertEqual(num,
len(list(self.r.get_popular_subreddits(limit=num))))
def test_get_random_subreddit(self):
subs = set()
for _ in range(3):
subs.add(self.r.get_subreddit('RANDOM').display_name)
self.assertTrue(len(subs) > 1)
def test_get_rising(self):
# Use low limit as rising listing has few elements. Keeping the limit
# prevents this test from becoming flaky.
num = 5
result = self.r.get_rising(limit=num)
self.assertEqual(num, len(list(result)))
def test_get_submissions(self):
def fullname(url):
return self.r.get_submission(url).fullname
fullnames = [fullname(self.comment_url), fullname(self.link_url)] * 100
retreived = [x.fullname for x in self.r.get_submissions(fullnames)]
self.assertEqual(fullnames, retreived)
@mock.patch.object(Reddit, 'request_json')
def test_get_submissions_with_params(self, mock_my_method):
fake_submission = Submission(self.r, {'permalink': 'meh', 'score': 2,
'title': 'test'})
mock_resp = ({'data': {'children': [fake_submission]}},
{'data': {'children': []}})
mock_my_method.return_value = mock_resp
url = ("http://www.reddit.com/r/redditgetsdrawn/comments/"
"1ts9hi/surprise_me_thanks_in_advance/cec0897?context=3")
self.assertTrue(self.r.get_submission(url).score == 2)
self.assertTrue(self.r.get_submission(url).title == 'test')
base_url = ("http://www.reddit.com/r/redditgetsdrawn/comments/"
"1ts9hi/surprise_me_thanks_in_advance/cec0897")
params = {"context": "3"}
mock_my_method.assert_called_with(base_url, params=params)
@reddit_only
def test_get_top(self):
num = 50
result = self.r.get_top(limit=num, params={'t': 'all'})
self.assertEqual(num, len(list(result)))
def test_info_by_invalid_id(self):
self.assertEqual(None, self.r.get_info(thing_id='INVALID'))
def test_info_by_known_url_returns_known_id_link_post(self):
found_links = self.r.get_info(self.link_url_link)
tmp = self.r.get_submission(url=self.link_url)
self.assertTrue(tmp in found_links)
def test_info_by_url_also_found_by_id(self):
found_by_url = self.r.get_info(self.link_url_link)[0]
found_by_id = self.r.get_info(thing_id=found_by_url.fullname)
self.assertEqual(found_by_id, found_by_url)
@reddit_only
def test_info_by_url_maximum_listing(self):
self.assertEqual(100, len(self.r.get_info('http://www.reddit.com',
limit=101)))
def test_is_username_available(self):
self.assertFalse(self.r.is_username_available(self.un))
self.assertTrue(self.r.is_username_available(self.invalid_user_name))
self.assertFalse(self.r.is_username_available(''))
def test_not_logged_in_when_initialized(self):
self.assertEqual(self.r.user, None)
def test_require_user_agent(self):
self.assertRaises(TypeError, Reddit, user_agent=None)
self.assertRaises(TypeError, Reddit, user_agent='')
self.assertRaises(TypeError, Reddit, user_agent=1)
@reddit_only
def test_search(self):
self.assertTrue(list(self.r.search('test')))
@reddit_only
def test_search_with_syntax(self):
# Searching with timestamps only possible with cloudsearch
no_syntax = self.r.search("timestamp:1354348800..1354671600",
subreddit=self.sr)
self.assertFalse(list(no_syntax))
with_syntax = self.r.search("timestamp:1354348800..1354671600",
subreddit=self.sr, syntax='cloudsearch')
self.assertTrue(list(with_syntax))
@reddit_only
def test_search_with_time_window(self):
num = 50
submissions = len(list(self.r.search('test', subreddit=self.sr,
period='all', limit=num)))
self.assertTrue(submissions == num)
def test_search_reddit_names(self):
self.assertTrue(self.r.search_reddit_names('reddit'))
def test_store_json_result(self):
self.r.config.store_json_result = True
sub_url = ('http://www.reddit.com/r/reddit_api_test/comments/'
'1f7ojw/oauth_submit/')
sub = self.r.get_submission(url=sub_url)
self.assertEqual(sub.json_dict['url'], sub_url)
def test_store_lazy_json_result(self):
self.r.config.store_json_result = True
subreddit = self.r.get_subreddit(self.sr)
# Force object to load
subreddit.title
self.assertEqual(subreddit.json_dict['display_name'], self.sr)
class CacheTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_cache(self):
subreddit = self.r.get_subreddit(self.sr)
title = 'Test Cache: %s' % uuid.uuid4()
body = "BODY"
original_listing = list(subreddit.get_new(limit=5))
subreddit.submit(title, body)
new_listing = list(subreddit.get_new(limit=5))
self.assertEqual(original_listing, new_listing)
self.disable_cache()
no_cache_listing = list(subreddit.get_new(limit=5))
self.assertNotEqual(original_listing, no_cache_listing)
def test_refresh_subreddit(self):
self.disable_cache()
subreddit = self.r.get_subreddit(self.sr)
new_description = 'Description %s' % uuid.uuid4()
subreddit.update_settings(public_description=new_description)
self.assertNotEqual(new_description, subreddit.public_description)
subreddit.refresh()
self.assertEqual(new_description, subreddit.public_description)
def test_refresh_submission(self):
self.disable_cache()
subreddit = self.r.get_subreddit(self.sr)
submission = next(subreddit.get_top())
same_submission = self.r.get_submission(submission_id=submission.id)
if submission.likes:
submission.downvote()
else:
submission.upvote()
self.assertEqual(submission.likes, same_submission.likes)
submission.refresh()
self.assertNotEqual(submission.likes, same_submission.likes)
class EmbedTextTest(unittest.TestCase):
embed_text = "Hello"
def test_no_docstring(self):
# pylint: disable-msg=W0212
new_doc = decorators._embed_text(None, self.embed_text)
self.assertEqual(new_doc, self.embed_text)
def test_one_liner(self):
# pylint: disable-msg=W0212
new_doc = decorators._embed_text("Returns something cool",
self.embed_text)
self.assertEqual(new_doc,
"Returns something cool\n\n" + self.embed_text)
def test_multi_liner(self):
doc = """Jiggers the bar
Only run if foo is instantiated.
"""
# pylint: disable-msg=W0212
new_doc = decorators._embed_text(doc, self.embed_text)
self.assertEqual(new_doc, doc + self.embed_text + "\n\n")
def test_single_plus_params(self):
doc = """Jiggers the bar
:params foo: Self explanatory.
"""
expected_doc = """Jiggers the bar
{}
:params foo: Self explanatory.
""".format(self.embed_text)
# pylint: disable-msg=W0212
new_doc = decorators._embed_text(doc, self.embed_text)
self.assertEqual(new_doc, expected_doc)
def test_multi_plus_params(self):
doc = """Jiggers the bar
Jolly importment.
:params foo: Self explanatory.
:returns: The jiggered bar.
"""
expected_doc = """Jiggers the bar
Jolly importment.
{}
:params foo: Self explanatory.
:returns: The jiggered bar.
""".format(self.embed_text)
# pylint: disable-msg=W0212
new_doc = decorators._embed_text(doc, self.embed_text)
self.assertEqual(new_doc, expected_doc)
def test_additional_params(self):
doc = """Jiggers the bar
Jolly important.
:params foo: Self explanatory.
:returns: The jiggered bar.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be
altered.
"""
expected_doc = """Jiggers the bar
Jolly important.
{}
:params foo: Self explanatory.
:returns: The jiggered bar.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be
altered.
""".format(self.embed_text)
# pylint: disable-msg=W0212
new_doc = decorators._embed_text(doc, self.embed_text)
self.assertEqual(new_doc, expected_doc)
class EncodingTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_author_encoding(self):
# pylint: disable-msg=E1101
a1 = next(self.r.get_new()).author
a2 = self.r.get_redditor(text_type(a1))
self.assertEqual(a1, a2)
s1 = next(a1.get_submitted())
s2 = next(a2.get_submitted())
self.assertEqual(s1, s2)
def test_unicode_comment(self):
sub = next(self.r.get_subreddit(self.sr).get_new())
text = 'Have some unicode: (\xd0, \xdd)'
comment = sub.add_comment(text)
self.assertEqual(text, comment.body)
def test_unicode_submission(self):
unique = uuid.uuid4()
title = 'Wiki Entry on \xC3\x9C'
url = 'http://en.wikipedia.org/wiki/\xC3\x9C?id=%s' % unique
submission = self.r.submit(self.sr, title, url=url)
str(submission)
self.assertEqual(title, submission.title)
self.assertEqual(url, submission.url)
class MoreCommentsTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.submission = self.r.get_submission(url=self.more_comments_url,
comment_limit=10)
def test_all_comments(self):
c_len = len(self.submission.comments)
cf_len = len(helpers.flatten_tree(self.submission.comments))
saved = self.submission.replace_more_comments(threshold=2)
ac_len = len(self.submission.comments)
acf_len = len(helpers.flatten_tree(self.submission.comments))
# pylint: disable-msg=W0212
self.assertEqual(len(self.submission._comments_by_id), acf_len)
self.assertTrue(c_len < ac_len)
self.assertTrue(c_len < cf_len)
self.assertTrue(ac_len < acf_len)
self.assertTrue(cf_len < acf_len)
self.assertTrue(saved)
def test_comments_method(self):
predicate = lambda item: isinstance(item, MoreComments)
item = self.first(self.submission.comments, predicate)
self.assertTrue(item.comments())
class CommentEditTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_reply(self):
comment = next(self.r.user.get_comments())
new_body = '%s\n\n+Edit Text' % comment.body
comment = comment.edit(new_body)
self.assertEqual(comment.body, new_body)
class CommentPermalinkTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_inbox_permalink(self):
predicate = lambda item: isinstance(item, Comment)
item = self.first(self.r.get_inbox(), predicate)
self.assertTrue(item.id in item.permalink)
def test_user_comments_permalink(self):
item = next(self.r.user.get_comments())
self.assertTrue(item.id in item.permalink)
def test_get_comments_permalink(self):
sub = self.r.get_subreddit(self.sr)
item = next(sub.get_comments())
self.assertTrue(item.id in item.permalink)
class CommentReplyTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_add_comment_and_verify(self):
text = 'Unique comment: %s' % uuid.uuid4()
# pylint: disable-msg=E1101
submission = next(self.subreddit.get_new())
# pylint: enable-msg=E1101
comment = submission.add_comment(text)
self.assertEqual(comment.submission, submission)
self.assertEqual(comment.body, text)
def test_add_reply_and_verify(self):
text = 'Unique reply: %s' % uuid.uuid4()
predicate = lambda submission: submission.num_comments > 0
submission = self.first(self.subreddit.get_new(), predicate)
comment = submission.comments[0]
reply = comment.reply(text)
self.assertEqual(reply.parent_id, comment.fullname)
self.assertEqual(reply.body, text)
class CommentReplyNoneTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_front_page_comment_replies_are_none(self):
# pylint: disable-msg=E1101,W0212
item = next(self.r.get_comments('all'))
self.assertEqual(item._replies, None)
def test_inbox_comment_replies_are_none(self):
predicate = lambda item: isinstance(item, Comment)
comment = self.first(self.r.get_inbox(), predicate)
# pylint: disable-msg=W0212
self.assertEqual(comment._replies, None)
def test_spambox_comments_replies_are_none(self):
predicate = lambda item: isinstance(item, Comment)
sequence = self.r.get_subreddit(self.sr).get_spam()
comment = self.first(sequence, predicate)
# pylint: disable-msg=W0212
self.assertEqual(comment._replies, None)
def test_user_comment_replies_are_none(self):
predicate = lambda item: isinstance(item, Comment)
comment = self.first(self.r.user.get_comments(), predicate)
# pylint: disable-msg=W0212
self.assertEqual(comment._replies, None)
class FlairTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_add_link_flair(self):
flair_text = 'Flair: %s' % uuid.uuid4()
sub = next(self.subreddit.get_new())
self.subreddit.set_flair(sub, flair_text)
sub = self.r.get_submission(sub.permalink)
self.assertEqual(sub.link_flair_text, flair_text)
def test_add_link_flair_through_submission(self):
flair_text = 'Flair: %s' % uuid.uuid4()
sub = next(self.subreddit.get_new())
sub.set_flair(flair_text)
sub = self.r.get_submission(sub.permalink)
self.assertEqual(sub.link_flair_text, flair_text)
def test_add_link_flair_to_invalid_subreddit(self):
sub = next(self.r.get_subreddit('python').get_new())
self.assertRaises(HTTPError, self.subreddit.set_flair, sub, 'text')
def test_add_user_flair_by_subreddit_name(self):
flair_text = 'Flair: %s' % uuid.uuid4()
self.r.set_flair(self.sr, self.r.user, flair_text)
flair = self.r.get_flair(self.sr, self.r.user)
self.assertEqual(flair['flair_text'], flair_text)
self.assertEqual(flair['flair_css_class'], None)
def test_add_user_flair_to_invalid_user(self):
self.assertRaises(errors.InvalidFlairTarget, self.subreddit.set_flair,
self.invalid_user_name)
def test_add_user_flair_by_name(self):
flair_text = 'Flair: %s' % uuid.uuid4()
flair_css = 'a%d' % random.randint(0, 1024)
self.subreddit.set_flair(text_type(self.r.user), flair_text, flair_css)
flair = self.subreddit.get_flair(self.r.user)
self.assertEqual(flair['flair_text'], flair_text)
self.assertEqual(flair['flair_css_class'], flair_css)
def test_clear_user_flair(self):
self.subreddit.set_flair(self.r.user)
flair = self.subreddit.get_flair(self.r.user)
self.assertEqual(flair['flair_text'], None)
self.assertEqual(flair['flair_css_class'], None)
def test_delete_flair(self):
flair = list(self.subreddit.get_flair_list(limit=1))[0]
self.subreddit.delete_flair(flair['user'])
self.assertTrue(flair not in self.subreddit.get_flair_list())
def test_flair_csv_and_flair_list(self):
# Clear all flair
self.subreddit.clear_all_flair()
self.delay(5) # Wait for flair to clear
self.assertEqual([], list(self.subreddit.get_flair_list()))
# Set flair
flair_mapping = [{'user': 'reddit', 'flair_text': 'dev'},
{'user': self.un, 'flair_css_class': 'xx'},
{'user': self.other_user_name,
'flair_text': 'AWESOME',
'flair_css_class': 'css'}]
self.subreddit.set_flair_csv(flair_mapping)
self.assertEqual([], flair_diff(flair_mapping,
list(self.subreddit.get_flair_list())))
def test_flair_csv_many(self):
users = ('reddit', self.un, self.other_user_name)
flair_text_a = 'Flair: %s' % uuid.uuid4()
flair_text_b = 'Flair: %s' % uuid.uuid4()
flair_mapping = [{'user': 'reddit', 'flair_text': flair_text_a}] * 99
for user in users:
flair_mapping.append({'user': user, 'flair_text': flair_text_b})
self.subreddit.set_flair_csv(flair_mapping)
for user in users:
flair = self.subreddit.get_flair(user)
self.assertEqual(flair['flair_text'], flair_text_b)
def test_flair_csv_optional_args(self):
flair_mapping = [{'user': 'reddit', 'flair_text': 'reddit'},
{'user': self.other_user_name, 'flair_css_class':
'blah'}]
self.subreddit.set_flair_csv(flair_mapping)
def test_flair_csv_empty(self):
self.assertRaises(errors.ClientException,
self.subreddit.set_flair_csv, [])
def test_flair_csv_requires_user(self):
flair_mapping = [{'flair_text': 'hsdf'}]
self.assertRaises(errors.ClientException,
self.subreddit.set_flair_csv, flair_mapping)
class FlairSelectTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.priv_sr)
self.user_flair_templates = {
'UserCssClassOne': ('21e00aae-09cf-11e3-a4f1-12313d281541',
'default_user_flair_text_one'),
'UserCssClassTwo': ('2f6504c2-09cf-11e3-9d8d-12313d281541',
'default_user_flair_text_two')
}
self.link_flair_templates = {
'LinkCssClassOne': ('36a573c0-09cf-11e3-b5f7-12313d096169',
'default_link_flair_text_one'),
'LinkCssClassTwo': ('3b73f516-09cf-11e3-9a71-12313d281541',
'default_link_flair_text_two')
}
def get_different_user_flair_class(self):
flair = self.r.get_flair(self.subreddit, self.r.user)
if flair == self.user_flair_templates.keys()[0]:
different_flair = self.user_flair_templates.keys()[1]
else:
different_flair = self.user_flair_templates.keys()[0]
return different_flair
def get_different_link_flair_class(self, submission):
flair = submission.link_flair_css_class
if flair == self.link_flair_templates.keys()[0]:
different_flair = self.link_flair_templates.keys()[1]
else:
different_flair = self.link_flair_templates.keys()[0]
return different_flair
def test_select_user_flair(self):
flair_class = self.get_different_user_flair_class()
flair_id = self.user_flair_templates[flair_class][0]
flair_default_text = self.user_flair_templates[flair_class][1]
self.r.select_flair(item=self.subreddit,
flair_template_id=flair_id)
flair = self.r.get_flair(self.subreddit, self.r.user)
self.assertEqual(flair['flair_text'], flair_default_text)
self.assertEqual(flair['flair_css_class'], flair_class)
def test_select_link_flair(self):
sub = next(self.subreddit.get_new())
flair_class = self.get_different_link_flair_class(sub)
flair_id = self.link_flair_templates[flair_class][0]
flair_default_text = self.link_flair_templates[flair_class][1]
self.r.select_flair(item=sub,
flair_template_id=flair_id)
sub = self.r.get_submission(sub.permalink)
self.assertEqual(sub.link_flair_text, flair_default_text)
self.assertEqual(sub.link_flair_css_class, flair_class)
def test_select_user_flair_custom_text(self):
flair_class = self.get_different_user_flair_class()
flair_id = self.user_flair_templates[flair_class][0]
flair_text = 'Flair: %s' % uuid.uuid4()
self.r.select_flair(item=self.subreddit,
flair_template_id=flair_id,
flair_text=flair_text)
flair = self.r.get_flair(self.subreddit, self.r.user)
self.assertEqual(flair['flair_text'], flair_text)
self.assertEqual(flair['flair_css_class'], flair_class)
def test_select_link_flair_custom_text(self):
sub = next(self.subreddit.get_new())
flair_class = self.get_different_link_flair_class(sub)
flair_id = self.link_flair_templates[flair_class][0]
flair_text = 'Flair: %s' % uuid.uuid4()
self.r.select_flair(item=sub,
flair_template_id=flair_id,
flair_text=flair_text)
sub = self.r.get_submission(sub.permalink)
self.assertEqual(sub.link_flair_text, flair_text)
self.assertEqual(sub.link_flair_css_class, flair_class)
def test_select_user_flair_remove(self):
flair = self.r.get_flair(self.subreddit, self.r.user)
if flair['flair_css_class'] is None:
flair_class = self.get_different_user_flair_class()
flair_id = self.user_flair_templates[flair_class][0]
self.r.select_flair(item=self.subreddit,
flair_template_id=flair_id)
self.r.select_flair(item=self.subreddit)
flair = self.r.get_flair(self.subreddit, self.r.user)
self.assertEqual(flair['flair_text'], None)
self.assertEqual(flair['flair_css_class'], None)
def test_select_link_flair_remove(self):
sub = next(self.subreddit.get_new())
if sub.link_flair_css_class is None:
flair_class = self.get_different_link_flair_class(sub)
flair_id = self.link_flair_templates[flair_class][0]
self.r.select_flair(item=sub,
flair_template_id=flair_id)
self.r.select_flair(item=sub)
sub = self.r.get_submission(sub.permalink)
self.assertEqual(sub.link_flair_text, None)
self.assertEqual(sub.link_flair_css_class, None)
class FlairTemplateTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_add_user_template(self):
self.subreddit.add_flair_template('text', 'css', True)
def test_add_link_template(self):
self.subreddit.add_flair_template('text', 'css', True, True)
self.subreddit.add_flair_template(text='text', is_link=True)
self.subreddit.add_flair_template(css_class='blah', is_link=True)
self.subreddit.add_flair_template(is_link=True)
def test_clear_user_templates(self):
self.subreddit.clear_flair_templates()
def test_clear_link_templates(self):
self.subreddit.clear_flair_templates(True)
class ImageTests(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
test_dir = os.path.dirname(sys.modules[__name__].__file__)
self.image_path = os.path.join(test_dir, 'files', '{0}')
def test_delete_header(self):
self.subreddit.delete_image(header=True)
def test_delete_image(self):
images = self.subreddit.get_stylesheet()['images']
for img_data in images[:5]:
self.subreddit.delete_image(name=img_data['name'])
updated_images = self.subreddit.get_stylesheet()['images']
self.assertNotEqual(images, updated_images)
def test_delete_invalid_image(self):
self.assertRaises(errors.BadCSSName,
self.subreddit.delete_image, 'invalid_image_name')
def test_delete_invalid_params(self):
self.assertRaises(TypeError, self.subreddit.delete_image, name='Foo',
header=True)
def test_upload_invalid_file_path(self):
self.assertRaises(IOError, self.subreddit.upload_image, 'nonexistent')
def test_upload_uerinvalid_image(self):
image = self.image_path.format('white-square.tiff')
self.assertRaises(errors.ClientException, self.subreddit.upload_image,
image)
def test_upload_invalid_image_too_small(self):
image = self.image_path.format('invalid.jpg')
self.assertRaises(errors.ClientException, self.subreddit.upload_image,
image)
def test_upload_invalid_image_too_large(self):
image = self.image_path.format('big')
self.assertRaises(errors.ClientException, self.subreddit.upload_image,
image)
def test_upload_invalid_params(self):
image = self.image_path.format('white-square.jpg')
self.assertRaises(TypeError, self.subreddit.upload_image, image,
name='Foo', header=True)
def test_upload_invalid_image_path(self):
self.assertRaises(IOError, self.subreddit.upload_image, 'bar.png')
@reddit_only
def test_upload_jpg_header(self):
image = self.image_path.format('white-square.jpg')
self.assertTrue(self.subreddit.upload_image(image, header=True))
@reddit_only
def test_upload_jpg_image(self):
image = self.image_path.format('white-square.jpg')
self.assertTrue(self.subreddit.upload_image(image))
@reddit_only
def test_upload_jpg_image_named(self):
image = self.image_path.format('white-square.jpg')
name = text_type(uuid.uuid4())
self.assertTrue(self.subreddit.upload_image(image, name))
images_json = self.subreddit.get_stylesheet()['images']
self.assertTrue(any(name in text_type(x['name']) for x in images_json))
@reddit_only
def test_upload_jpg_image_no_extension(self):
image = self.image_path.format('white-square')
self.assertTrue(self.subreddit.upload_image(image))
@reddit_only
def test_upload_png_header(self):
image = self.image_path.format('white-square.png')
self.assertTrue(self.subreddit.upload_image(image, header=True))
@reddit_only
def test_upload_png_image(self):
image = self.image_path.format('white-square.png')
self.assertTrue(self.subreddit.upload_image(image))
@reddit_only
def test_upload_png_image_named(self):
image = self.image_path.format('white-square.png')
name = text_type(uuid.uuid4())
self.assertTrue(self.subreddit.upload_image(image, name))
images_json = self.subreddit.get_stylesheet()['images']
self.assertTrue(any(name in text_type(x['name']) for x in images_json))
class LocalOnlyTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
@local_only
def test_create_existing_redditor(self):
self.r.login(self.un, self.un_pswd)
self.assertRaises(errors.UsernameExists, self.r.create_redditor,
self.other_user_name, self.other_user_pswd)
@local_only
def test_create_existing_subreddit(self):
self.r.login(self.un, self.un_pswd)
self.assertRaises(errors.SubredditExists, self.r.create_subreddit,
self.sr, 'foo')
@local_only
def test_create_redditor(self):
unique_name = 'PyAPITestUser%d' % random.randint(3, 10240)
self.r.create_redditor(unique_name, '1111')
@local_only
def test_create_subreddit(self):
unique_name = 'test%d' % random.randint(3, 10240)
description = '#Welcome to %s\n\n0 item 1\n0 item 2\n' % unique_name
self.r.login(self.un, self.un_pswd)
self.r.create_subreddit(unique_name, 'The %s' % unique_name,
description)
@local_only
def test_delete_redditor(self):
random_u = 'PyAPITestUser%d' % random.randint(3, 10240)
random_p = 'pass%d' % random.randint(3, 10240)
self.r.create_redditor(random_u, random_p)
self.r.login(random_u, random_p)
self.assertTrue(self.r.is_logged_in())
self.r.delete(random_p)
self.r.clear_authentication()
self.assertRaises(errors.InvalidUserPass, self.r.login, random_u,
random_p)
@local_only
def test_delete_redditor_wrong_password(self):
random_u = 'PyAPITestUser%d' % random.randint(3, 10240)
random_p = 'pass%d' % random.randint(3, 10240)
self.r.create_redditor(random_u, random_p)
self.r.login(random_u, random_p)
self.assertTrue(self.r.is_logged_in())
self.assertRaises(errors.InvalidUserPass, self.r.delete, 'wxyz')
@local_only
def test_failed_feedback(self):
self.assertRaises(errors.InvalidEmails, self.r.send_feedback,
'a', 'b', 'c')
@local_only
def test_send_feedback(self):
msg = 'You guys are awesome. (Sent from the PRAW python module).'
self.r.send_feedback('Bryce Boe', 'foo@foo.com', msg)
class MessageTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_get_unread_update_has_mail(self):
self.r.send_message(self.other_user_name, 'Update has mail', 'body')
self.r.login(self.other_user_name, self.other_user_pswd)
self.assertTrue(self.r.user.has_mail)
self.r.get_unread(limit=1, unset_has_mail=True, update_user=True)
self.assertFalse(self.r.user.has_mail)
def test_mark_as_read(self):
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_user_name, self.other_user_pswd)
# pylint: disable-msg=E1101
msg = next(oth.get_unread(limit=1))
msg.mark_as_read()
self.assertTrue(msg not in oth.get_unread(limit=5))
def test_mark_as_unread(self):
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_user_name, self.other_user_pswd)
msg = self.first(oth.get_inbox(), lambda msg: not msg.new)
msg.mark_as_unread()
self.assertTrue(msg in oth.get_unread())
def test_mark_multiple_as_read(self):
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_user_name, self.other_user_pswd)
messages = []
for msg in oth.get_unread(limit=None):
if msg.author != oth.user.name:
messages.append(msg)
if len(messages) >= 2:
break
self.assertEqual(2, len(messages))
oth.user.mark_as_read(messages)
unread = list(oth.get_unread(limit=5))
self.assertTrue(all(msg not in unread for msg in messages))
def test_reply_to_message_and_verify(self):
text = 'Unique message reply: %s' % uuid.uuid4()
predicate = lambda msg: (isinstance(msg, Message)
and msg.author == self.r.user)
msg = self.first(self.r.get_inbox(), predicate)
reply = msg.reply(text)
self.assertEqual(reply.parent_id, msg.fullname)
def test_send(self):
subject = 'Unique message: %s' % uuid.uuid4()
self.r.user.send_message(subject, 'Message content')
self.first(self.r.get_inbox(), lambda msg: msg.subject == subject)
def test_send_invalid(self):
subject = 'Unique message: %s' % uuid.uuid4()
self.assertRaises(errors.InvalidUser, self.r.send_message,
self.invalid_user_name, subject, 'Message content')
class ModeratorSubmissionTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_approve(self):
submission = next(self.subreddit.get_spam())
if not submission:
self.fail('Could not find a submission to approve.')
submission.approve()
predicate = lambda approved: approved.id == submission.id
self.first(self.subreddit.get_new(), predicate)
def test_ignore_reports(self):
submission = next(self.subreddit.get_new())
self.assertFalse(submission in self.subreddit.get_mod_log())
submission.ignore_reports()
submission.report()
self.disable_cache()
submission.refresh()
self.assertFalse(submission in self.subreddit.get_mod_log())
self.assertTrue(submission.num_reports > 0)
def test_remove(self):
submission = next(self.subreddit.get_new())
if not submission:
self.fail('Could not find a submission to remove.')
submission.remove()
predicate = lambda removed: removed.id == submission.id
self.first(self.subreddit.get_spam(), predicate)
class ModeratorSubredditTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_get_mod_log(self):
self.assertTrue(list(self.subreddit.get_mod_log()))
def test_get_mod_log_with_mod_by_name(self):
other = self.r.get_redditor(self.other_user_name)
actions = list(self.subreddit.get_mod_log(mod=other.name))
self.assertTrue(actions)
self.assertTrue(all(x.mod.lower() == other.name.lower()
for x in actions))
def test_get_mod_log_with_mod_by_redditor_object(self):
other = self.r.get_redditor(self.other_user_name)
actions = list(self.subreddit.get_mod_log(mod=other))
self.assertTrue(actions)
self.assertTrue(all(x.mod.lower() == other.name.lower()
for x in actions))
def test_get_mod_log_with_action_filter(self):
actions = list(self.subreddit.get_mod_log(action='removelink'))
self.assertTrue(actions)
self.assertTrue(all(x.action == 'removelink' for x in actions))
def test_mod_mail_send(self):
subject = 'Unique message: %s' % uuid.uuid4()
self.r.get_subreddit(self.sr).send_message(subject, 'Content')
self.first(self.r.get_mod_mail(), lambda msg: msg.subject == subject)
def test_get_mod_queue(self):
self.assertTrue(list(self.r.get_subreddit('mod').get_mod_queue()))
def test_get_mod_queue_with_default_subreddit(self):
self.assertTrue(list(self.r.get_mod_queue()))
def test_get_mod_queue_multi(self):
multi = '{0}+{1}'.format(self.sr, self.priv_sr)
self.assertTrue(list(self.r.get_subreddit(multi).get_mod_queue()))
def test_get_unmoderated(self):
self.assertTrue(list(self.subreddit.get_unmoderated()))
class ModeratorUserTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
self.other = self.r.get_redditor(self.other_user_name, fetch=True)
def add_remove(self, add, remove, listing, add_callback=None):
def test_add():
add(self.other)
if add_callback:
add_callback()
self.assertTrue(self.other in listing())
def test_remove():
remove(self.other)
self.assertTrue(self.other not in listing())
self.disable_cache()
if self.other in listing():
test_remove()
test_add()
else:
test_add()
test_remove()
def test_accept_moderator_invite_fail(self):
self.r.login(self.other_user_name, self.other_user_pswd)
self.assertRaises(errors.InvalidInvite,
self.subreddit.accept_moderator_invite)
def test_ban(self):
self.add_remove(self.subreddit.add_ban, self.subreddit.remove_ban,
self.subreddit.get_banned)
def test_get_banned_note(self):
# TODO: Update this test to add/update the ban note when ban note
# adding is supported.
params = {'user': self.other_non_mod_name}
data = next(self.subreddit.get_banned(user_only=False, params=params))
self.assertEqual(data['note'], 'no reason in particular 2')
def test_contributors(self):
self.add_remove(self.subreddit.add_contributor,
self.subreddit.remove_contributor,
self.subreddit.get_contributors)
def test_moderator(self):
def add_callback():
tmp = Reddit(USER_AGENT, disable_update_check=True)
tmp.login(self.other_user_name, self.other_user_pswd)
tmp.get_subreddit(self.sr).accept_moderator_invite()
self.add_remove(self.subreddit.add_moderator,
self.subreddit.remove_moderator,
self.subreddit.get_moderators,
add_callback)
def test_make_moderator_by_name_failure(self):
self.assertTrue(self.r.user in self.subreddit.get_moderators())
self.assertRaises(errors.AlreadyModerator,
self.subreddit.add_moderator, text_type(self.r.user))
def test_wiki_ban(self):
self.add_remove(self.subreddit.add_wiki_ban,
self.subreddit.remove_wiki_ban,
self.subreddit.get_wiki_banned)
def test_wiki_contributors(self):
self.add_remove(self.subreddit.add_wiki_contributor,
self.subreddit.remove_wiki_contributor,
self.subreddit.get_wiki_contributors)
class OAuth2Test(unittest.TestCase, BasicHelper):
def setUp(self):
self.configure()
site_name = (os.getenv('REDDIT_SITE') or 'reddit') + '_oauth_test'
self.r = Reddit(USER_AGENT, site_name=site_name,
disable_update_check=True)
self.invalid = Reddit(USER_AGENT, disable_update_check=True)
def test_authorize_url(self):
url, params = self.r.get_authorize_url('...').split('?', 1)
self.assertTrue('api/v1/authorize/' in url)
params = dict(x.split('=', 1) for x in params.split('&'))
expected = {'client_id': self.r.config.client_id,
'duration': 'temporary',
'redirect_uri': ('http%3A%2F%2F127.0.0.1%3A65010%2F'
'authorize_callback'),
'response_type': 'code', 'scope': 'identity',
'state': '...'}
self.assertEqual(expected, params)
@interactive_only
def test_get_access_information(self):
print('Visit this URL: {0}'.format(self.r.get_authorize_url('...')))
code = prompt('Code from redir URL: ')
token = self.r.get_access_information(code)
expected = {'access_token': self.r.access_token,
'refresh_token': None,
'scope': set(('identity',))}
self.assertEqual(expected, token)
self.assertNotEqual(None, self.r.user)
def test_get_access_information_with_invalid_code(self):
self.assertRaises(errors.OAuthInvalidGrant,
self.r.get_access_information, 'invalid_code')
def test_invalid_app_access_token(self):
self.assertRaises(errors.OAuthAppRequired,
self.invalid.get_access_information, 'dummy_code')
def test_invalid_app_authorize_url(self):
self.assertRaises(errors.OAuthAppRequired,
self.invalid.get_authorize_url, 'dummy_state')
def test_invalid_set_access_credentials(self):
self.assertRaises(errors.OAuthInvalidToken,
self.r.set_access_credentials,
set(('identity',)), 'dummy_access_token')
@reddit_only
def test_scope_edit(self):
self.r.refresh_access_information(self.refresh_token['edit'])
submission = Submission.from_id(self.r, self.submission_edit_id)
self.assertEqual(submission, submission.edit('Edited text'))
@reddit_only
def test_scope_history(self):
self.r.refresh_access_information(self.refresh_token['history'])
self.assertTrue(list(self.r.get_redditor(self.un).get_liked()))
@reddit_only
def test_scope_identity(self):
self.r.refresh_access_information(self.refresh_token['identity'])
self.assertEqual(self.un, self.r.get_me().name)
@reddit_only
def test_scope_modconfig(self):
self.r.refresh_access_information(self.refresh_token['modconfig'])
self.r.get_subreddit(self.sr).set_settings('foobar')
@reddit_only
def test_scope_modflair(self):
self.r.refresh_access_information(self.refresh_token['modflair'])
self.r.get_subreddit(self.sr).set_flair(self.un, 'foobar')
@reddit_only
def test_scope_modlog(self):
num = 50
self.r.refresh_access_information(self.refresh_token['modlog'])
result = self.r.get_subreddit(self.sr).get_mod_log(limit=num)
self.assertEqual(num, len(list(result)))
@reddit_only
def test_scope_modposts(self):
self.r.refresh_access_information(self.refresh_token['modposts'])
Submission.from_id(self.r, self.submission_edit_id).remove()
@reddit_only
def test_scope_mysubreddits(self):
self.r.refresh_access_information(self.refresh_token['mysubreddits'])
self.assertTrue(list(self.r.get_my_moderation()))
@reddit_only
def test_scope_privatemessages(self):
self.r.refresh_access_information(
self.refresh_token['privatemessages'])
self.assertTrue(list(self.r.get_inbox()))
@reddit_only
def test_scope_read(self):
self.r.refresh_access_information(self.refresh_token['read'])
self.assertTrue(self.r.get_subreddit(self.priv_sr).subscribers > 0)
fullname = '{0}_{1}'.format(self.r.config.by_object[Submission],
self.priv_submission_id)
method1 = self.r.get_info(thing_id=fullname)
method2 = self.r.get_submission(submission_id=self.priv_submission_id)
self.assertEqual(method1, method2)
@reddit_only
def test_scope_read_get_front_page(self):
self.r.refresh_access_information(self.refresh_token['mysubreddits'])
subscribed = list(self.r.get_my_subreddits(limit=None))
self.r.refresh_access_information(self.refresh_token['read'])
for post in self.r.get_front_page():
self.assertTrue(post.subreddit in subscribed)
@reddit_only
def test_scope_read_get_sub_listingr(self):
self.r.refresh_access_information(self.refresh_token['read'])
subreddit = self.r.get_subreddit(self.priv_sr)
self.assertTrue(list(subreddit.get_top()))
@reddit_only
def test_scope_read_get_submission_by_url(self):
url = ("http://www.reddit.com/r/reddit_api_test_priv/comments/16kbb7/"
"google/")
self.r.refresh_access_information(self.refresh_token['read'])
submission = Submission.from_url(self.r, url)
self.assertTrue(submission.num_comments != 0)
@reddit_only
def test_scope_read_priv_sr_comments(self):
self.r.refresh_access_information(self.refresh_token['read'])
self.assertTrue(list(self.r.get_comments(self.priv_sr)))
@reddit_only
def test_scope_read_priv_sub_comments(self):
self.r.refresh_access_information(self.refresh_token['read'])
submission = Submission.from_id(self.r, self.priv_submission_id)
self.assertTrue(submission.comments)
@reddit_only
def test_scope_submit(self):
self.r.refresh_access_information(self.refresh_token['submit'])
result = self.r.submit(self.sr, 'OAuth Submit', text='Foo')
self.assertTrue(isinstance(result, Submission))
@reddit_only
def test_scope_subscribe(self):
self.r.refresh_access_information(self.refresh_token['subscribe'])
self.r.get_subreddit(self.sr).subscribe()
@reddit_only
def test_scope_vote(self):
self.r.refresh_access_information(self.refresh_token['vote'])
submission = Submission.from_id(self.r, self.submission_edit_id)
submission.clear_vote()
@reddit_only
def test_set_access_credentials(self):
self.assertTrue(self.r.user is None)
result = self.r.refresh_access_information(
self.refresh_token['identity'], update_session=False)
self.assertTrue(self.r.user is None)
self.r.set_access_credentials(**result)
self.assertFalse(self.r.user is None)
@reddit_only
def test_oauth_without_identy_doesnt_set_user(self):
self.assertTrue(self.r.user is None)
self.r.refresh_access_information(self.refresh_token['edit'])
self.assertTrue(self.r.user is None)
def test_set_oauth_info(self):
self.assertRaises(errors.OAuthAppRequired,
self.invalid.get_authorize_url, 'dummy_state')
self.invalid.set_oauth_app_info(self.r.client_id, self.r.client_secret,
self.r.redirect_uri)
self.invalid.get_authorize_url('dummy_state')
class RedditorTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.other_user = self.r.get_redditor(self.other_user_name)
def test_duplicate_login(self):
self.r.login(self.other_user_name, self.other_user_pswd)
def test_get_disliked(self):
# Pulls from get_liked. Problem here may come from get_liked
item = next(self.r.user.get_liked())
item.downvote()
self.delay() # The queue needs to be processed
self.assertFalse(item in self.r.user.get_liked())
def test_get_friends(self):
# See issue 175.
# If this test fails and doesn't raise an exception, but smoothly calls
# self.r.user.get_friends, then issue 175 has been resolved.
self.assertRaises(errors.RedirectException, self.r.user.get_friends)
def test_get_hidden(self):
submission = next(self.r.user.get_submitted())
submission.hide()
self.delay() # The queue needs to be processed
item = next(self.r.user.get_hidden())
item.unhide()
self.delay()
self.assertFalse(item in self.r.user.get_hidden())
def test_get_liked(self):
# Pulls from get_disliked. Problem here may come from get_disliked
item = next(self.r.user.get_disliked())
item.upvote()
self.delay() # The queue needs to be processed
self.assertFalse(item in self.r.user.get_disliked())
def test_get_redditor(self):
self.assertEqual(self.other_user_id, self.other_user.id)
def test_get_submitted(self):
redditor = self.r.get_redditor(self.other_non_mod_name)
self.assertTrue(list(redditor.get_submitted()))
def test_user_set_on_login(self):
self.assertTrue(isinstance(self.r.user, LoggedInRedditor))
class SettingsTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_set_settings(self):
title = 'Reddit API Test %s' % uuid.uuid4()
self.subreddit.set_settings(title, wikimode='anyone')
self.assertEqual(self.subreddit.get_settings()['title'], title)
def test_set_stylesheet(self):
stylesheet = ('div.titlebox span.number:after {\ncontent: " %s"\n' %
uuid.uuid4())
self.subreddit.set_stylesheet(stylesheet)
self.assertEqual(stylesheet,
self.subreddit.get_stylesheet()['stylesheet'])
def test_set_stylesheet_invalid_css(self):
self.assertRaises(errors.BadCSS, self.subreddit.set_stylesheet,
'INVALID CSS')
def test_update_settings_description(self):
self.maxDiff = None
settings = self.subreddit.get_settings()
settings['description'] = 'Description %s' % uuid.uuid4()
self.subreddit.update_settings(description=settings['description'])
new = self.subreddit.get_settings()
# The id should change, but nothing else
key = 'prev_description_id'
self.assertNotEqual(settings[key], new[key])
del settings[key]
del new[key]
self.assertEqual(settings, new)
def test_update_settings_public_description(self):
self.maxDiff = None
settings = self.subreddit.get_settings()
settings['public_description'] = 'Description %s' % uuid.uuid4()
self.subreddit.update_settings(
public_description=settings['public_description'])
new = self.subreddit.get_settings()
# The id should change, but nothing else
key = 'prev_public_description_id'
self.assertNotEqual(settings[key], new[key])
del settings[key]
del new[key]
self.assertEqual(settings, new)
class SubmissionCreateTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_create_duplicate_failure(self):
predicate = lambda item: not item.is_self
found = self.first(self.r.user.get_submitted(), predicate)
self.assertRaises(errors.AlreadySubmitted, self.r.submit, self.sr,
found.title, url=found.url)
def test_create_duplicate_success(self):
predicate = lambda item: not item.is_self
found = self.first(self.r.user.get_submitted(), predicate)
submission = self.r.submit(self.sr, found.title, url=found.url,
resubmit=True)
self.assertEqual(submission.title, found.title)
self.assertEqual(submission.url, found.url)
def test_create_link_through_subreddit(self):
unique = uuid.uuid4()
title = 'Test Link: %s' % unique
url = 'http://bryceboe.com/?bleh=%s' % unique
subreddit = self.r.get_subreddit(self.sr)
submission = subreddit.submit(title, url=url)
self.assertEqual(submission.title, title)
self.assertEqual(submission.url, url)
def test_create_self_and_verify(self):
title = 'Test Self: %s' % uuid.uuid4()
content = 'BODY'
submission = self.r.submit(self.sr, title, text=content)
self.assertEqual(submission.title, title)
self.assertEqual(submission.selftext, content)
class SubmissionEditTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_distinguish_and_undistinguish(self):
def verify_distinguish(submission):
submission.distinguish()
submission.refresh()
self.assertTrue(submission.distinguished)
def verify_undistinguish(submission):
submission.undistinguish()
submission.refresh()
self.assertFalse(submission.distinguished)
self.disable_cache()
sub_id = self.submission_edit_id
submission = self.r.get_submission(submission_id=sub_id)
if submission.distinguished:
verify_undistinguish(submission)
verify_distinguish(submission)
else:
verify_distinguish(submission)
verify_undistinguish(submission)
def test_edit_link(self):
predicate = lambda item: not item.is_self
found = self.first(self.r.user.get_submitted(), predicate)
self.assertRaises(HTTPError, found.edit, 'text')
def test_edit_self(self):
predicate = lambda item: item.is_self
found = self.first(self.r.user.get_submitted(), predicate)
new_body = '%s\n\n+Edit Text' % found.selftext
found = found.edit(new_body)
self.assertEqual(found.selftext, new_body)
def test_mark_as_nsfw_as_author(self):
self.disable_cache()
self.r.login(self.other_non_mod_name, self.other_non_mod_pswd)
submission = self.r.get_submission(submission_id="1nt8co")
self.assertEqual(submission.author, self.r.user)
originally_nsfw = submission.over_18
if originally_nsfw:
submission.unmark_as_nsfw()
else:
submission.mark_as_nsfw()
submission.refresh()
self.assertNotEqual(originally_nsfw, submission.over_18)
def test_mark_as_nsfw_as_mod(self):
self.disable_cache()
submission = self.r.get_submission(submission_id="1nt8co")
submission.mark_as_nsfw()
submission.refresh()
self.assertTrue(submission.over_18)
def test_mark_as_nsfw_exception(self):
self.disable_cache()
self.r.login(self.other_non_mod_name, self.other_non_mod_pswd)
predicate = lambda item: item.author != self.r.user
found = self.first(self.subreddit.get_top(), predicate)
self.assertRaises(errors.ModeratorOrScopeRequired, found.mark_as_nsfw)
def test_unmark_as_nsfw(self):
self.disable_cache()
submission = self.r.get_submission(submission_id="1nt8co")
submission.unmark_as_nsfw()
submission.refresh()
self.assertFalse(submission.over_18)
class SubmissionTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
def test_clear_vote(self):
predicate = lambda submission: submission.likes is False
submission = self.first(self.r.user.get_submitted(), predicate)
submission.clear_vote()
# reload the submission
submission = self.r.get_submission(submission_id=submission.id)
self.assertEqual(submission.likes, None)
def test_delete(self):
submission = list(self.r.user.get_submitted())[-1]
submission.delete()
# reload the submission
submission = self.r.get_submission(submission_id=submission.id)
self.assertEqual(None, submission.author)
def test_downvote(self):
predicate = lambda submission: submission.likes is True
submission = self.first(self.r.user.get_submitted(), predicate)
submission.downvote()
# reload the submission
submission = self.r.get_submission(submission_id=submission.id)
self.assertEqual(submission.likes, False)
def test_hide(self):
self.disable_cache()
predicate = lambda item: not item.hidden
submission = self.first(self.r.user.get_submitted(), predicate)
submission.hide()
submission.refresh()
self.assertTrue(submission.hidden)
def test_report(self):
self.disable_cache()
# login as new user to report submission
oth = Reddit(USER_AGENT, disable_update_check=True)
oth.login(self.other_user_name, self.other_user_pswd)
subreddit = oth.get_subreddit(self.sr)
predicate = lambda submission: not submission.hidden
submission = self.first(subreddit.get_new(), predicate)
submission.report()
# check if submission was reported
predicate = lambda report: report.id == submission.id
self.first(self.r.get_subreddit(self.sr).get_reports(), predicate)
def test_save(self):
predicate = lambda submission: not submission.saved
submission = self.first(self.r.user.get_submitted(), predicate)
submission.save()
# reload the submission
submission = self.r.get_submission(submission_id=submission.id)
self.assertTrue(submission.saved)
# verify in saved_links
self.first(self.r.user.get_saved(), lambda item: item == submission)
def test_short_link(self):
submission = next(self.r.get_new())
if self.r.config.is_reddit:
self.assertTrue(submission.id in submission.short_link)
else:
self.assertRaises(errors.ClientException, getattr, submission,
'short_link')
def test_sticky_unsticky(self):
def verify_sticky():
submission.sticky()
submission.refresh()
self.assertTrue(submission.stickied)
def verify_unsticky():
submission.unsticky()
submission.refresh()
self.assertFalse(submission.stickied)
self.disable_cache()
submission_id = self.submission_edit_id
submission = self.r.get_submission(submission_id=submission_id)
if submission.stickied:
verify_unsticky()
verify_sticky()
else:
verify_sticky()
verify_unsticky()
def test_unhide(self):
self.disable_cache()
predicate = lambda submission: submission.hidden
submission = self.first(self.r.user.get_submitted(), predicate)
submission.unhide()
submission.refresh()
self.assertFalse(submission.hidden)
def test_unsave(self):
predicate = lambda submission: submission.saved
submission = self.first(self.r.user.get_submitted(), predicate)
submission.unsave()
# reload the submission
submission = self.r.get_submission(submission_id=submission.id)
self.assertFalse(submission.saved)
def test_upvote(self):
predicate = lambda submission: submission.likes is None
submission = self.first(self.r.user.get_submitted(), predicate)
submission.upvote()
# reload the submission
submission = self.r.get_submission(submission_id=submission.id)
self.assertEqual(submission.likes, True)
class SubredditTest(unittest.TestCase, AuthenticatedHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_attribute_error(self):
self.assertRaises(AttributeError, getattr, self.subreddit, 'foo')
def test_get_contributors_private(self):
self.r.login(self.other_non_mod_name, self.other_non_mod_pswd)
private_sub = self.r.get_subreddit(self.priv_sr)
self.assertTrue(list(private_sub.get_contributors()))
def test_get_contributors_public(self):
self.assertTrue(list(self.subreddit.get_contributors()))
def test_get_contributors_public_exception(self):
self.r.login(self.other_non_mod_name, self.other_non_mod_pswd)
self.assertRaises(errors.ModeratorRequired,
self.subreddit.get_contributors)
def test_get_my_contributions(self):
predicate = lambda subreddit: text_type(subreddit) == self.sr
self.first(self.r.get_my_contributions(), predicate)
def test_get_my_moderation(self):
predicate = lambda subreddit: text_type(subreddit) == self.sr
self.first(self.r.get_my_moderation(), predicate)
def test_get_my_subreddits(self):
for subreddit in self.r.get_my_subreddits():
# pylint: disable-msg=W0212
self.assertTrue(text_type(subreddit) in subreddit._info_url)
@reddit_only
def test_search(self):
self.assertTrue(list(self.subreddit.search('test')))
def test_get_subreddit_recommendations(self):
result = self.r.get_subreddit_recommendations('python')
self.assertTrue(result)
def test_subscribe_and_verify(self):
self.subreddit.subscribe()
predicate = lambda subreddit: text_type(subreddit) == self.sr
self.first(self.r.get_my_subreddits(), predicate)
def test_subscribe_by_name_and_verify(self):
self.r.subscribe(self.sr)
predicate = lambda subreddit: text_type(subreddit) == self.sr
self.first(self.r.get_my_subreddits(), predicate)
def test_unsubscribe_and_verify(self):
self.subreddit.unsubscribe()
pred = lambda subreddit: text_type(subreddit) != self.sr
self.assertTrue(all(pred(sub) for sub in self.r.get_my_subreddits()))
def test_unsubscribe_by_name_and_verify(self):
self.r.unsubscribe(self.sr)
pred = lambda subreddit: text_type(subreddit) != self.sr
self.assertTrue(all(pred(sub) for sub in self.r.get_my_subreddits()))
class ToRedditListTest(unittest.TestCase, BasicHelper):
def setUp(self):
self.configure()
def test__to_reddit_list(self):
# pylint: disable-msg=W0212
output = internal._to_reddit_list('hello')
self.assertEquals('hello', output)
def test__to_reddit_list_with_list(self):
# pylint: disable-msg=W0212
output = internal._to_reddit_list(['hello'])
self.assertEqual('hello', output)
def test__to_reddit_list_with_empty_list(self):
# pylint: disable-msg=W0212
output = internal._to_reddit_list([])
self.assertEqual('', output)
def test__to_reddit_list_with_big_list(self):
# pylint: disable-msg=W0212
output = internal._to_reddit_list(['hello', 'world'])
self.assertEqual('hello,world', output)
def test__to_reddit_list_with_object(self):
obj = self.r.get_subreddit(self.sr)
# pylint: disable-msg=W0212
output = internal._to_reddit_list(obj)
self.assertEqual(self.sr, output)
def test__to_reddit_list_with_object_in_list(self):
obj = self.r.get_subreddit(self.sr)
# pylint: disable-msg=W0212
output = internal._to_reddit_list([obj])
self.assertEqual(self.sr, output)
def test__to_reddit_list_with_mix(self):
obj = self.r.get_subreddit(self.sr)
# pylint: disable-msg=W0212
output = internal._to_reddit_list([obj, 'hello'])
self.assertEqual("{0},{1}".format(self.sr, 'hello'), output)
class WikiTests(unittest.TestCase, BasicHelper):
def setUp(self):
self.configure()
self.subreddit = self.r.get_subreddit(self.sr)
def test_edit_wiki_page(self):
self.r.login(self.un, self.un_pswd)
page = self.subreddit.get_wiki_page('index')
content = 'Body: {0}'.format(uuid.uuid4())
page.edit(content)
self.disable_cache()
page = self.subreddit.get_wiki_page('index')
self.assertEqual(content, page.content_md)
def test_get_wiki_page(self):
self.assertEqual(
'{0}:index'.format(self.sr),
text_type(self.r.get_wiki_page(self.sr, 'index')))
def test_get_wiki_pages(self):
result = self.subreddit.get_wiki_pages()
self.assertTrue(result)
tmp = self.subreddit.get_wiki_page(result[0].page).content_md
self.assertEqual(result[0].content_md, tmp)
def test_revision_by(self):
self.assertTrue(any(x.revision_by for x in
self.subreddit.get_wiki_pages()))
if __name__ == '__main__':
unittest.main()
|
x89/praw
|
praw/tests/__init__.py
|
Python
|
gpl-3.0
| 75,843
|
[
"VisIt"
] |
7ca1cb2297ace6702339125e21167ca8158a3563d768993b6ea7f52231c1a1df
|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
import pickle
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, assert_warns, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',
'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',
'foldcauchy', 'gamma', 'gengamma', 'loggamma',
'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma',
'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',
'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',
'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',
'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',
'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',
'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',
'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang', 'tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice', 'kappa4', 'kappa3', 'truncnorm']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
assert_(pval > alpha,
msg="D = {}; pval = {}; alpha = {}; args = {}".format(
D, pval, alpha, args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'trapz':
args = tuple(np.sort(np.random.random(nargs)))
elif dist == 'triang':
args = tuple(np.random.random(nargs))
elif dist == 'reciprocal' or dist == 'truncnorm':
vals = np.random.random(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0 + np.random.random(nargs))
else:
args = tuple(1.0 + np.random.random(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
def test_support():
"""gh-6235"""
def check_open_support(rvs, args):
dist = getattr(stats, rvs)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
dists = ['alpha', 'arcsine', 'betaprime', 'burr', 'burr12',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald']
dct = dict(distcont)
for dist in dists:
args = dct[dist]
yield check_open_support, dist, args
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(TestCase):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges,
eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
exspected = -2239.771 # From R
assert_almost_equal(result, exspected, decimal=3)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(TestCase):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(TestCase):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
class TestPoisson(TestCase):
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
class TestSkewNorm(TestCase):
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk'),
decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk'),
decimal=2)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
class TestGumbelL(TestCase):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res, floc=0)
vals2 = distfunc.fit(res, fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestRayleigh(TestCase):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(TestCase):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
class TestTrapz(TestCase):
def test_reduces_to_triang(self):
modes = [0.3, 0.5]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
old_err = np.seterr(divide='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
assert_almost_equal(stats.trapz.pdf(x, 0, 1),
stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1),
stats.uniform.cdf(x))
np.seterr(**old_err)
def test_cases(self):
old_err = np.seterr(divide='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8), 1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0), 1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
np.seterr(**old_err)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall('logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall('logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [',\s*,', '\(\s*,', '^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
p = stats.genextreme.cdf(.5, 0)
p = stats.genextreme.pdf(.5, 0)
p = stats.genextreme.ppf(.5, 0)
p = stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-q)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
if __name__ == "__main__":
run_module_suite()
|
bkendzior/scipy
|
scipy/stats/tests/test_distributions.py
|
Python
|
bsd-3-clause
| 111,178
|
[
"Gaussian"
] |
cb32e7532a79fdd5d6aa93e54c2f7990df048df98c124012d4695d6a8114ec9e
|
# (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: jsonfile
short_description: JSON formatted files.
description:
- This cache uses JSON formatted, per host, files saved to the filesystem.
version_added: "1.9"
author: Ansible Core (@ansible-core)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import codecs
import json
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
def _load(self, filepath):
# Valid JSON is always UTF-8 encoded.
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return json.load(f, cls=AnsibleJSONDecoder)
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
|
azaghal/ansible
|
test/support/integration/plugins/cache/jsonfile.py
|
Python
|
gpl-3.0
| 2,004
|
[
"Brian"
] |
f13768cdc3c54f61e7d77ec3c9ffd45cda2415b891ad4be7ad7383133d94df6a
|
"""Bayesian Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import math
import numpy as np
from scipy.special import betaln, digamma, gammaln
from .base import BaseMixture, _check_shape
from .gaussian_mixture import _check_precision_matrix
from .gaussian_mixture import _check_precision_positivity
from .gaussian_mixture import _compute_log_det_cholesky
from .gaussian_mixture import _compute_precision_cholesky
from .gaussian_mixture import _estimate_gaussian_parameters
from .gaussian_mixture import _estimate_log_gaussian_prob
from ..utils import check_array
from ..utils.validation import check_is_fitted
def _log_dirichlet_norm(dirichlet_concentration):
"""Compute the log of the Dirichlet distribution normalization term.
Parameters
----------
dirichlet_concentration : array-like, shape (n_samples,)
The parameters values of the Dirichlet distribution.
Returns
-------
log_dirichlet_norm : float
The log normalization of the Dirichlet distribution.
"""
return (gammaln(np.sum(dirichlet_concentration)) -
np.sum(gammaln(dirichlet_concentration)))
def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):
"""Compute the log of the Wishart distribution normalization term.
Parameters
----------
degrees_of_freedom : array-like, shape (n_components,)
The number of degrees of freedom on the covariance Wishart
distributions.
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
n_features : int
The number of features.
Return
------
log_wishart_norm : array-like, shape (n_components,)
The log normalization of the Wishart distribution.
"""
# To simplify the computation we have removed the np.log(np.pi) term
return -(degrees_of_freedom * log_det_precisions_chol +
degrees_of_freedom * n_features * .5 * math.log(2.) +
np.sum(gammaln(.5 * (degrees_of_freedom -
np.arange(n_features)[:, np.newaxis])), 0))
class BayesianGaussianMixture(BaseMixture):
"""Variational Bayesian estimation of a Gaussian mixture.
This class allows to infer an approximate posterior distribution over the
parameters of a Gaussian mixture distribution. The effective number of
components can be inferred from the data.
This class implements two types of prior for the weights distribution: a
finite mixture model with Dirichlet distribution and an infinite mixture
model with the Dirichlet Process. In practice Dirichlet Process inference
algorithm is approximated and uses a truncated distribution with a fixed
maximum number of components (called the Stick-breaking representation).
The number of components actually used almost always depends on the data.
.. versionadded:: 0.18
Read more in the :ref:`User Guide <bgmm>`.
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components. Depending on the data and the value
of the `weight_concentration_prior` the model can decide to not use
all the components by setting some component `weights_` to values very
close to zero. The number of effective components is therefore smaller
than n_components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, defaults to 'full'
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix),
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance).
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
lower bound average gain on the likelihood (of the training data with
respect to the model) is below this threshold.
reg_covar : float, defaults to 1e-6.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The result with the highest
lower bound value on the likelihood is kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
covariances.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weight_concentration_prior_type : str, defaults to 'dirichlet_process'.
String describing the type of the weight concentration prior.
Must be one of::
'dirichlet_process' (using the Stick-breaking representation),
'dirichlet_distribution' (can favor more uniform weights).
weight_concentration_prior : float | None, optional.
The dirichlet concentration of each component on the weight
distribution (Dirichlet). This is commonly called gamma in the
literature. The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
mixture weights simplex. The value of the parameter must be greater
than 0. If it is None, it's set to ``1. / n_components``.
mean_precision_prior : float | None, optional.
The precision prior on the mean distribution (Gaussian).
Controls the extend to where means can be placed. Larger
values concentrate the means of each clusters around `mean_prior`.
The value of the parameter must be greater than 0.
If it is None, it's set to 1.
mean_prior : array-like, shape (n_features,), optional
The prior on the mean distribution (Gaussian).
If it is None, it's set to the mean of X.
degrees_of_freedom_prior : float | None, optional.
The prior of the number of degrees of freedom on the covariance
distributions (Wishart). If it is None, it's set to `n_features`.
covariance_prior : float or array-like, optional
The prior on the covariance distribution (Wishart).
If it is None, the emiprical covariance prior is initialized using the
covariance of X. The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
See :term:`the Glossary <warm_start>`.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default to 10.
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like, shape (n_components,)
The weights of each mixture components.
means_ : array-like, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of inference to reach the
convergence.
lower_bound_ : float
Lower bound value on the likelihood (of the training data with
respect to the model) of the best fit of inference.
weight_concentration_prior_ : tuple or float
The dirichlet concentration of each component on the weight
distribution (Dirichlet). The type depends on
``weight_concentration_prior_type``::
(float, float) if 'dirichlet_process' (Beta parameters),
float if 'dirichlet_distribution' (Dirichlet parameters).
The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
simplex.
weight_concentration_ : array-like, shape (n_components,)
The dirichlet concentration of each component on the weight
distribution (Dirichlet).
mean_precision_prior : float
The precision prior on the mean distribution (Gaussian).
Controls the extend to where means can be placed.
Larger values concentrate the means of each clusters around
`mean_prior`.
mean_precision_ : array-like, shape (n_components,)
The precision of each components on the mean distribution (Gaussian).
mean_prior_ : array-like, shape (n_features,)
The prior on the mean distribution (Gaussian).
degrees_of_freedom_prior_ : float
The prior of the number of degrees of freedom on the covariance
distributions (Wishart).
degrees_of_freedom_ : array-like, shape (n_components,)
The number of degrees of freedom of each components in the model.
covariance_prior_ : float or array-like
The prior on the covariance distribution (Wishart).
The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
See Also
--------
GaussianMixture : Finite Gaussian mixture fit with EM.
References
----------
.. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine
learning". Vol. 4 No. 4. New York: Springer.
<https://www.springer.com/kr/book/9780387310732>`_
.. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for
Graphical Models". In Advances in Neural Information Processing
Systems 12.
<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.36.2841&rep=rep1&type=pdf>`_
.. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational
inference for Dirichlet process mixtures". Bayesian analysis 1.1
<https://www.cs.princeton.edu/courses/archive/fall11/cos597C/reading/BleiJordan2005.pdf>`_
"""
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=None,
mean_precision_prior=None, mean_prior=None,
degrees_of_freedom_prior=None, covariance_prior=None,
random_state=None, warm_start=False, verbose=0,
verbose_interval=10):
super().__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weight_concentration_prior_type = weight_concentration_prior_type
self.weight_concentration_prior = weight_concentration_prior
self.mean_precision_prior = mean_precision_prior
self.mean_prior = mean_prior
self.degrees_of_freedom_prior = degrees_of_freedom_prior
self.covariance_prior = covariance_prior
def _check_parameters(self, X):
"""Check that the parameters are well defined.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if (self.weight_concentration_prior_type not in
['dirichlet_process', 'dirichlet_distribution']):
raise ValueError(
"Invalid value for 'weight_concentration_prior_type': %s "
"'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% self.weight_concentration_prior_type)
self._check_weights_parameters()
self._check_means_parameters(X)
self._check_precision_parameters(X)
self._checkcovariance_prior_parameter(X)
def _check_weights_parameters(self):
"""Check the parameter of the Dirichlet distribution."""
if self.weight_concentration_prior is None:
self.weight_concentration_prior_ = 1. / self.n_components
elif self.weight_concentration_prior > 0.:
self.weight_concentration_prior_ = (
self.weight_concentration_prior)
else:
raise ValueError("The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% self.weight_concentration_prior)
def _check_means_parameters(self, X):
"""Check the parameters of the Gaussian distribution.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.mean_precision_prior is None:
self.mean_precision_prior_ = 1.
elif self.mean_precision_prior > 0.:
self.mean_precision_prior_ = self.mean_precision_prior
else:
raise ValueError("The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% self.mean_precision_prior)
if self.mean_prior is None:
self.mean_prior_ = X.mean(axis=0)
else:
self.mean_prior_ = check_array(self.mean_prior,
dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.mean_prior_, (n_features, ), 'means')
def _check_precision_parameters(self, X):
"""Check the prior parameters of the precision distribution.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.degrees_of_freedom_prior is None:
self.degrees_of_freedom_prior_ = n_features
elif self.degrees_of_freedom_prior > n_features - 1.:
self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior
else:
raise ValueError("The parameter 'degrees_of_freedom_prior' "
"should be greater than %d, but got %.3f."
% (n_features - 1, self.degrees_of_freedom_prior))
def _checkcovariance_prior_parameter(self, X):
"""Check the `covariance_prior_`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.covariance_prior is None:
self.covariance_prior_ = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()
}[self.covariance_type]
elif self.covariance_type in ['full', 'tied']:
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features, n_features),
'%s covariance_prior' % self.covariance_type)
_check_precision_matrix(self.covariance_prior_,
self.covariance_type)
elif self.covariance_type == 'diag':
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features,),
'%s covariance_prior' % self.covariance_type)
_check_precision_positivity(self.covariance_prior_,
self.covariance_type)
# spherical case
elif self.covariance_prior > 0.:
self.covariance_prior_ = self.covariance_prior
else:
raise ValueError("The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% self.covariance_prior)
def _initialize(self, X, resp):
"""Initialization of the mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
nk, xk, sk = _estimate_gaussian_parameters(X, resp, self.reg_covar,
self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_weights(self, nk):
"""Estimate the parameters of the Dirichlet distribution.
Parameters
----------
nk : array-like, shape (n_components,)
"""
if self.weight_concentration_prior_type == 'dirichlet_process':
# For dirichlet process weight_concentration will be a tuple
# containing the two parameters of the beta distribution
self.weight_concentration_ = (
1. + nk,
(self.weight_concentration_prior_ +
np.hstack((np.cumsum(nk[::-1])[-2::-1], 0))))
else:
# case Variationnal Gaussian mixture with dirichlet distribution
self.weight_concentration_ = self.weight_concentration_prior_ + nk
def _estimate_means(self, nk, xk):
"""Estimate the parameters of the Gaussian distribution.
Parameters
----------
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
"""
self.mean_precision_ = self.mean_precision_prior_ + nk
self.means_ = ((self.mean_precision_prior_ * self.mean_prior_ +
nk[:, np.newaxis] * xk) /
self.mean_precision_[:, np.newaxis])
def _estimate_precisions(self, nk, xk, sk):
"""Estimate the precisions parameters of the precision distribution.
Parameters
----------
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like
The shape depends of `covariance_type`:
'full' : (n_components, n_features, n_features)
'tied' : (n_features, n_features)
'diag' : (n_components, n_features)
'spherical' : (n_components,)
"""
{"full": self._estimate_wishart_full,
"tied": self._estimate_wishart_tied,
"diag": self._estimate_wishart_diag,
"spherical": self._estimate_wishart_spherical
}[self.covariance_type](nk, xk, sk)
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_wishart_full(self, nk, xk, sk):
"""Estimate the full Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components, n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is
# the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
self.covariances_ = np.empty((self.n_components, n_features,
n_features))
for k in range(self.n_components):
diff = xk[k] - self.mean_prior_
self.covariances_[k] = (self.covariance_prior_ + nk[k] * sk[k] +
nk[k] * self.mean_precision_prior_ /
self.mean_precision_[k] * np.outer(diff,
diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= (
self.degrees_of_freedom_[:, np.newaxis, np.newaxis])
def _estimate_wishart_tied(self, nk, xk, sk):
"""Estimate the tied Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = (
self.degrees_of_freedom_prior_ + nk.sum() / self.n_components)
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + sk * nk.sum() / self.n_components +
self.mean_precision_prior_ / self.n_components * np.dot(
(nk / self.mean_precision_) * diff.T, diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _estimate_wishart_diag(self, nk, xk, sk):
"""Estimate the diag Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + nk[:, np.newaxis] * (
sk + (self.mean_precision_prior_ /
self.mean_precision_)[:, np.newaxis] * np.square(diff)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
def _estimate_wishart_spherical(self, nk, xk, sk):
"""Estimate the spherical Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components,)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + nk * (
sk + self.mean_precision_prior_ / self.mean_precision_ *
np.mean(np.square(diff), 1)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _check_is_fitted(self):
check_is_fitted(self, ['weight_concentration_', 'mean_precision_',
'means_', 'degrees_of_freedom_',
'covariances_', 'precisions_',
'precisions_cholesky_'])
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
nk, xk, sk = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_log_weights(self):
if self.weight_concentration_prior_type == 'dirichlet_process':
digamma_sum = digamma(self.weight_concentration_[0] +
self.weight_concentration_[1])
digamma_a = digamma(self.weight_concentration_[0])
digamma_b = digamma(self.weight_concentration_[1])
return (digamma_a - digamma_sum +
np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])))
else:
# case Variationnal Gaussian mixture with dirichlet distribution
return (digamma(self.weight_concentration_) -
digamma(np.sum(self.weight_concentration_)))
def _estimate_log_prob(self, X):
_, n_features = X.shape
# We remove `n_features * np.log(self.degrees_of_freedom_)` because
# the precision matrix is normalized
log_gauss = (_estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type) -
.5 * n_features * np.log(self.degrees_of_freedom_))
log_lambda = n_features * np.log(2.) + np.sum(digamma(
.5 * (self.degrees_of_freedom_ -
np.arange(0, n_features)[:, np.newaxis])), 0)
return log_gauss + .5 * (log_lambda -
n_features / self.mean_precision_)
def _compute_lower_bound(self, log_resp, log_prob_norm):
"""Estimate the lower bound of the model.
The lower bound on the likelihood (of the training data with respect to
the model) is used to detect the convergence and has to decrease at
each iteration.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
log_prob_norm : float
Logarithm of the probability of each sample in X.
Returns
-------
lower_bound : float
"""
# Contrary to the original formula, we have done some simplification
# and removed all the constant terms.
n_features, = self.mean_prior_.shape
# We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
# because the precision matrix is normalized.
log_det_precisions_chol = (_compute_log_det_cholesky(
self.precisions_cholesky_, self.covariance_type, n_features) -
.5 * n_features * np.log(self.degrees_of_freedom_))
if self.covariance_type == 'tied':
log_wishart = self.n_components * np.float64(_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features))
else:
log_wishart = np.sum(_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features))
if self.weight_concentration_prior_type == 'dirichlet_process':
log_norm_weight = -np.sum(betaln(self.weight_concentration_[0],
self.weight_concentration_[1]))
else:
log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)
return (-np.sum(np.exp(log_resp) * log_resp) -
log_wishart - log_norm_weight -
0.5 * n_features * np.sum(np.log(self.mean_precision_)))
def _get_parameters(self):
return (self.weight_concentration_,
self.mean_precision_, self.means_,
self.degrees_of_freedom_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weight_concentration_, self.mean_precision_, self.means_,
self.degrees_of_freedom_, self.covariances_,
self.precisions_cholesky_) = params
# Weights computation
if self.weight_concentration_prior_type == "dirichlet_process":
weight_dirichlet_sum = (self.weight_concentration_[0] +
self.weight_concentration_[1])
tmp = self.weight_concentration_[1] / weight_dirichlet_sum
self.weights_ = (
self.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
self.weights_ /= np.sum(self.weights_)
else:
self. weights_ = (self.weight_concentration_ /
np.sum(self.weight_concentration_))
# Precisions matrices computation
if self.covariance_type == 'full':
self.precisions_ = np.array([
np.dot(prec_chol, prec_chol.T)
for prec_chol in self.precisions_cholesky_])
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
|
chrsrds/scikit-learn
|
sklearn/mixture/bayesian_mixture.py
|
Python
|
bsd-3-clause
| 33,009
|
[
"Gaussian"
] |
e54297397d5427973da431c8f27eedc3f53b16ba6e924de70c1c2ae9d64d733c
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Yang Gao <younggao1994@gmail.com>
#
'''
Analytical electron-phonon matrix for restricted kohm sham
'''
import numpy as np
from pyscf import lib
from pyscf.hessian import rks as rks_hess
from pyscf.hessian import rhf as rhf_hess
from pyscf.grad import rks as rks_grad
from pyscf.dft import numint
from pyscf.eph import rhf as rhf_eph
from pyscf.data.nist import MP_ME
CUTOFF_FREQUENCY = rhf_eph.CUTOFF_FREQUENCY
KEEP_IMAG_FREQUENCY = rhf_eph.KEEP_IMAG_FREQUENCY
def _get_vxc_deriv1(hessobj, mo_coeff, mo_occ, max_memory):
"""" This functions is slightly different from hessian.rks._get_vxc_deriv1 in that <\nabla u|Vxc|v> is removed"""
mol = hessobj.mol
mf = hessobj.base
if hessobj.grids is not None:
grids = hessobj.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
nao, nmo = mo_coeff.shape
ni = mf._numint
xctype = ni._xc_type(mf.xc)
aoslices = mol.aoslice_by_atom()
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
vmat = np.zeros((mol.natm,3,nao,nao))
max_memory = max(2000, max_memory-vmat.size*8/1e6)
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')
vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]
frr = fxc[0]
ao_dm0 = numint._dot_ao_dm(mol, ao[0], dm0, mask, shls_slice, ao_loc)
for ia in range(mol.natm):
p0, p1 = aoslices[ia][2:]
rho1 = np.einsum('xpi,pi->xp', ao[1:,:,p0:p1], ao_dm0[:,p0:p1])
aow = np.einsum('pi,xp->xpi', ao[0], weight*frr*rho1)
rks_grad._d1_dot_(vmat[ia], mol, aow, ao[0], mask, ao_loc, True)
ao_dm0 = aow = None
for ia in range(mol.natm):
vmat[ia] = -vmat[ia] - vmat[ia].transpose(0,2,1)
elif xctype == 'GGA':
ao_deriv = 2
# v_ip = np.zeros((3,nao,nao))
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')
vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]
wv = numint._rks_gga_wv0(rho, vxc, weight)
# rks_grad._gga_grad_sum_(v_ip, mol, ao, wv, mask, ao_loc)
ao_dm0 = [numint._dot_ao_dm(mol, ao[i], dm0, mask, shls_slice, ao_loc)
for i in range(4)]
for ia in range(mol.natm):
wv = dR_rho1 = rks_hess._make_dR_rho1(ao, ao_dm0, ia, aoslices)
wv[0] = numint._rks_gga_wv1(rho, dR_rho1[0], vxc, fxc, weight)
wv[1] = numint._rks_gga_wv1(rho, dR_rho1[1], vxc, fxc, weight)
wv[2] = numint._rks_gga_wv1(rho, dR_rho1[2], vxc, fxc, weight)
aow = np.einsum('npi,Xnp->Xpi', ao[:4], wv)
rks_grad._d1_dot_(vmat[ia], mol, aow, ao[0], mask, ao_loc, True)
ao_dm0 = aow = None
for ia in range(mol.natm):
vmat[ia] = -vmat[ia] - vmat[ia].transpose(0,2,1)
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
return vmat
def get_eph(ephobj, mo1, omega, vec, mo_rep):
if isinstance(mo1, str):
mo1 = lib.chkfile.load(mo1, 'scf_mo1')
mo1 = dict([(int(k), mo1[k]) for k in mo1])
mol = ephobj.mol
mf = ephobj.base
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)
omg, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
vnuc_deriv = ephobj.vnuc_generator(mol)
aoslices = mol.aoslice_by_atom()
vind = rhf_eph.rhf_deriv_generator(mf, mf.mo_coeff, mf.mo_occ)
mocc = mf.mo_coeff[:,mf.mo_occ>0]
dm0 = np.dot(mocc, mocc.T) * 2
natoms = mol.natm
nao = mol.nao_nr()
mem_now = lib.current_memory()[0]
max_memory = max(2000, mf.max_memory*.9-mem_now)
vxc1ao = _get_vxc_deriv1(ephobj, mf.mo_coeff, mf.mo_occ, max_memory)
vcore = []
for ia in range(natoms):
h1 = vnuc_deriv(ia)
v1 = vind(mo1[ia])
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1) + (0, mol.nbas)*3
if abs(hyb)>1e-10:
vj1, vk1 = \
rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',
['ji->s2kl', -dm0[:,p0:p1], #vj1
'li->s1kj', -dm0[:,p0:p1]], #vk1
shls_slice=shls_slice)
veff = vj1 - hyb * .5 * vk1
if abs(omg) > 1e-10:
with mol.with_range_coulomb(omg):
vk1 = \
rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',
['li->s1kj', -dm0[:,p0:p1]], # vk1
shls_slice=shls_slice)
veff -= (alpha-hyb) * .5 * vk1
else:
vj1 = rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',
['ji->s2kl', -dm0[:,p0:p1]], # vj1
shls_slice=shls_slice)
veff = vj1[0]
vtot = h1 + v1 + veff + vxc1ao[ia] + veff.transpose(0,2,1)
vcore.append(vtot)
vcore = np.asarray(vcore).reshape(-1,nao,nao)
mass = mol.atom_mass_list() * MP_ME
vec = rhf_eph._freq_mass_weighted_vec(vec, omega, mass)
mat = np.einsum('xJ,xuv->Juv', vec, vcore, optimize=True)
if mo_rep:
mat = np.einsum('Juv,up,vq->Jpq', mat, mf.mo_coeff.conj(), mf.mo_coeff, optimize=True)
return mat
class EPH(rks_hess.Hessian):
'''EPH for restricted DFT
Attributes:
cutoff_frequency : float or int
cutoff frequency in cm-1. Default is 80
keep_imag_frequency : bool
Whether to keep imaginary frequencies in the output. Default is False
Saved results
omega : numpy.ndarray
Vibrational frequencies in au.
vec : numpy.ndarray
Polarization vectors of the vibration modes
eph : numpy.ndarray
Electron phonon matrix eph[j,a,b] (j in nmodes, a,b in norbs)
'''
def __init__(self, scf_method, cutoff_frequency=CUTOFF_FREQUENCY,
keep_imag_frequency=KEEP_IMAG_FREQUENCY):
rks_hess.Hessian.__init__(self, scf_method)
self.cutoff_frequency = cutoff_frequency
self.keep_imag_frequency = keep_imag_frequency
get_mode = rhf_eph.get_mode
get_eph = get_eph
vnuc_generator = rhf_eph.vnuc_generator
kernel = rhf_eph.kernel
if __name__ == '__main__':
from pyscf import gto, dft
mol = gto.M()
mol.atom = [['O', [0.000000000000, 0.000000002577,0.868557119905]],
['H', [0.000000000000,-1.456050381698,2.152719488376]],
['H', [0.000000000000, 1.456050379121,2.152719486067]]]
mol.unit = 'Bohr'
mol.basis = 'sto3g'
mol.verbose=4
mol.build() # this is a pre-computed relaxed geometry
mf = dft.RKS(mol)
mf.grids.level=6
mf.xc = 'b3lyp'
mf.conv_tol = 1e-16
mf.conv_tol_grad = 1e-10
mf.kernel()
grad = mf.nuc_grad_method().kernel()
print("Force on the atoms/au:")
print(grad)
myeph = EPH(mf)
eph, omega = myeph.kernel(mo_rep=True)
print(np.amax(eph))
|
sunqm/pyscf
|
pyscf/eph/rks.py
|
Python
|
apache-2.0
| 8,073
|
[
"PySCF"
] |
7c602086c9720407cc94bc860689c6bdbd70f0b5e87e9f2a1a3b300294fb7972
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
# Timothy Berkelbach <tim.berkelbach@gmail.com>
#
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc.ccsd_t_slow import r3
'''
QCISD(T)
'''
# t3 as ijkabc
# JCP 94, 442 (1991); DOI:10.1063/1.460359. Error in Eq (1), should be [ia] >= [jb] >= [kc]
def kernel(mycc, eris, t1=None, t2=None, verbose=logger.NOTE):
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mycc.stdout, verbose)
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
t1T = t1.T
t2T = t2.transpose(2,3,0,1)
nocc, nvir = t1.shape
mo_e = eris.fock.diagonal().real
e_occ, e_vir = mo_e[:nocc], mo_e[nocc:]
eijk = lib.direct_sum('i,j,k->ijk', e_occ, e_occ, e_occ)
eris_vvov = eris.get_ovvv().conj().transpose(1,3,0,2)
eris_vooo = numpy.asarray(eris.ovoo).conj().transpose(1,0,3,2)
eris_vvoo = numpy.asarray(eris.ovov).conj().transpose(1,3,0,2)
fvo = eris.fock[nocc:,:nocc]
def get_w(a, b, c):
w = numpy.einsum('if,fkj->ijk', eris_vvov[a,b], t2T[c,:])
w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a,:], t2T[b,c])
return w
def get_v(a, b, c):
v = numpy.einsum('ij,k->ijk', eris_vvoo[a,b], t1T[c])
v+= numpy.einsum('ij,k->ijk', t2T[a,b], fvo[c])
return v
et = 0
for a in range(nvir):
for b in range(a+1):
for c in range(b+1):
d3 = eijk - e_vir[a] - e_vir[b] - e_vir[c]
if a == c: # a == b == c
d3 *= 6
elif a == b or b == c:
d3 *= 2
wabc = get_w(a, b, c)
wacb = get_w(a, c, b)
wbac = get_w(b, a, c)
wbca = get_w(b, c, a)
wcab = get_w(c, a, b)
wcba = get_w(c, b, a)
vabc = get_v(a, b, c)
vacb = get_v(a, c, b)
vbac = get_v(b, a, c)
vbca = get_v(b, c, a)
vcab = get_v(c, a, b)
vcba = get_v(c, b, a)
zabc = r3(wabc + vabc) / d3
zacb = r3(wacb + vacb) / d3
zbac = r3(wbac + vbac) / d3
zbca = r3(wbca + vbca) / d3
zcab = r3(wcab + vcab) / d3
zcba = r3(wcba + vcba) / d3
et+= numpy.einsum('ijk,ijk', wabc, zabc.conj())
et+= numpy.einsum('ikj,ijk', wacb, zabc.conj())
et+= numpy.einsum('jik,ijk', wbac, zabc.conj())
et+= numpy.einsum('jki,ijk', wbca, zabc.conj())
et+= numpy.einsum('kij,ijk', wcab, zabc.conj())
et+= numpy.einsum('kji,ijk', wcba, zabc.conj())
et+= numpy.einsum('ijk,ijk', wacb, zacb.conj())
et+= numpy.einsum('ikj,ijk', wabc, zacb.conj())
et+= numpy.einsum('jik,ijk', wcab, zacb.conj())
et+= numpy.einsum('jki,ijk', wcba, zacb.conj())
et+= numpy.einsum('kij,ijk', wbac, zacb.conj())
et+= numpy.einsum('kji,ijk', wbca, zacb.conj())
et+= numpy.einsum('ijk,ijk', wbac, zbac.conj())
et+= numpy.einsum('ikj,ijk', wbca, zbac.conj())
et+= numpy.einsum('jik,ijk', wabc, zbac.conj())
et+= numpy.einsum('jki,ijk', wacb, zbac.conj())
et+= numpy.einsum('kij,ijk', wcba, zbac.conj())
et+= numpy.einsum('kji,ijk', wcab, zbac.conj())
et+= numpy.einsum('ijk,ijk', wbca, zbca.conj())
et+= numpy.einsum('ikj,ijk', wbac, zbca.conj())
et+= numpy.einsum('jik,ijk', wcba, zbca.conj())
et+= numpy.einsum('jki,ijk', wcab, zbca.conj())
et+= numpy.einsum('kij,ijk', wabc, zbca.conj())
et+= numpy.einsum('kji,ijk', wacb, zbca.conj())
et+= numpy.einsum('ijk,ijk', wcab, zcab.conj())
et+= numpy.einsum('ikj,ijk', wcba, zcab.conj())
et+= numpy.einsum('jik,ijk', wacb, zcab.conj())
et+= numpy.einsum('jki,ijk', wabc, zcab.conj())
et+= numpy.einsum('kij,ijk', wbca, zcab.conj())
et+= numpy.einsum('kji,ijk', wbac, zcab.conj())
et+= numpy.einsum('ijk,ijk', wcba, zcba.conj())
et+= numpy.einsum('ikj,ijk', wcab, zcba.conj())
et+= numpy.einsum('jik,ijk', wbca, zcba.conj())
et+= numpy.einsum('jki,ijk', wbac, zcba.conj())
et+= numpy.einsum('kij,ijk', wacb, zcba.conj())
et+= numpy.einsum('kji,ijk', wabc, zcba.conj())
et *= 2
log.info('QCISD(T) correction = %.15g', et)
return et
|
sunqm/pyscf
|
pyscf/cc/qcisd_t_slow.py
|
Python
|
apache-2.0
| 5,395
|
[
"PySCF"
] |
4c1e71f8e07d4f5b691266a7e092a6f2b2ffb110de67bd74b827e13246ccf0c9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from textwrap import dedent
from uuid import uuid4 as uuid
from collections import deque
from ruffus import *
import subprocess
from threading import Thread
from optparse import OptionParser
import multiprocessing
from math import frexp
import os, sys
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
sys.path.insert(0,os.path.abspath(os.path.join(exe_path,"..", "..")))
import logging
logger = logging.getLogger("run_parallel_blast")
import time
test_time = time.time()
MATCH_PART = 50
WRONG_MAP = {
"not_exist_such_taxon_in_my_res":0,"wrong_match_soft":0,"wrong_match":0,"wrong_pident":0,"wrong_group":0, \
"is_shorter":0,"number_wrong_taxon":0, "no_taxon_in_right_res":0, "empty": 0}
dist_map = {a: 0 for a in range(1000)}
f_not_exist_such_taxon_in_my_res= open ("not_exist_such_taxon_in_my_res", 'w')
f_wrong_match_soft = open ("wrong_match_soft", 'w')
f_wrong_pident = open ("wrong_pident", 'w')
f_wrong_group = open ("wrong_group", 'w')
f_no_taxon_in_right_res = open ("no_taxon_in_right_res", 'w')
parser = OptionParser(version="%prog 1.0", usage = "\n\n %prog --input_file QUERY_FASTA --database_file \
FASTA_DATABASE --groups_omcl_file OrthoMCL_Groups [more_options]")
"""formatter_class=argparse.RawDescriptionHelpFormatter,
description=dedent('''\
Parallel blastp + OrthoMCL
Author: Makarova Valentina, makarovavs704@gmail.com, 2015
Use: http://www.ruffus.org.uk/examples/bioinformatics/
In: set of proteins in fasta-file
Out: TSV-table, orthologGroups'''))"""
parser.add_option("-i", "--input_file", dest="input_file",
metavar="FILE",
type="string",
help="Name and path of query sequence file in FASTA format. ")
parser.add_option("-d", "--database_file", dest="database_file",
metavar="FILE",
type="string",
help="Name and path of FASTA database to search. ")
parser.add_option("-o", "--out_file", dest="out_file",
metavar="FILE",
type="string",
default="orthologGroups",
help="Name of output")
parser.add_option("-t", "--temp_directory", dest="temp_directory",
metavar="PATH",
type="string",
default="tmp",
help="Name and path of temporary directory where calculations "
"should take place. ")
parser.add_option('-g', "--groups_omcl_file", dest="omcl_file",
metavar='PATH',
default="groups_OrthoMCL-5.txt",
type=str, nargs=1,
help='Name and path to OrthoMCL groups file .fasta')
parser.add_option('-b', "--blastp_exe", dest="blastp",
metavar='PATH',
default="blastp",
type=str,
help='Name and path to blastp.exe file')
#
# general options: verbosity / logging
#
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more detailed messages for each additional verbose level."
" E.g. run_parallel_blast --verbose --verbose --verbose ... (or -vvv)")
#
# pipeline
#
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="jobs",
type="int",
help="Specifies the number of jobs (operations) to run in parallel.")
parser.add_option("--flowchart", dest="flowchart",
metavar="FILE",
type="string",
help="Print flowchart of the pipeline to FILE. Flowchart format "
"depends on extension. Alternatives include ('.dot', '.jpg', "
"'*.svg', '*.png' etc). Formats other than '.dot' require "
"the dot program to be installed (http://www.graphviz.org/).")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Only print a trace (description) of the pipeline. "
" The level of detail is set by --verbose.")
parser.add_option("-p", '--pack_size', dest="pack_size", metavar='INT', default=1, type=int, nargs=1,
help='Count of protein for one run blast')
(options, remaining_args) = parser.parse_args()
if not options.flowchart:
if not options.database_file:
parser.error("\n\n\tMissing parameter --database_file FILE\n\n")
if not options.input_file:
parser.error("\n\n\tMissing parameter --input_file FILE\n\n")
if not options.omcl_file:
parser.error("\n\n\tMissing parameter --omcl_file FILE\n\n")
if options.verbose:
logger.setLevel(logging.DEBUG)
stderrhandler = logging.StreamHandler(sys.stderr)
stderrhandler.setFormatter(logging.Formatter(" %(message)s"))
stderrhandler.setLevel(logging.DEBUG)
logger.addHandler(stderrhandler)
original_fasta = options.input_file
database_file = options.database_file
temp_directory = options.temp_directory
out_file = options.out_file
pack_size = options.pack_size
output_file = open(options.out_file, 'w', 0)
blastp = options.blastp
file_queue = []
if __name__ == '__main__':
m = multiprocessing.Manager()
file_queue = m.Queue()
Right_Results = {}
r_res_file = open("Octopus_bimaculoides.og.tsv", 'r')
for line in r_res_file:
qid,gr,sid,evalue_mant,evalue_exp,pident,pmatch = line.split()
Right_Results[qid] = [sid,evalue_mant,evalue_exp,pident,pmatch, gr]
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# flip orientation if nec.
def getStartEnd(h):
hspStart = h[0]
hspEnd = h[1]
if (hspStart > hspEnd):
hspEnd = h[0]
hspStart = h[1]
return hspStart,hspEnd
def computeNonOverlappingMatchLength(subject):
hsps = sorted(subject["hspspans"], key=lambda x: x[0])
first = hsps[0]
hsps = hsps[1::]
if len(first) == 0:
return 0
start, end = getStartEnd(first)
match_len = 0
for h in hsps:
hspStart, hspEnd = getStartEnd(h)
if hspEnd <= end: ##does not extend
continue
if hspStart <= end: ##overlaps
end = hspEnd #extend end ... already dealt with if new end is less
else: ##there is a gap in between ..
match_len += end - start + 1
start = hspStart
end = hspEnd
match_len += end - start + 1 # deal with the last one
return match_len
def formatEvalue(evalue):
if evalue[0] == 'e':
evalue = '1' + evalue
evalue = "%.3e" % float(evalue)
evalue_mant,evalue_exp = evalue.split("e")
evalue_mant = "%.2f" % float(evalue_mant)
if float(evalue_mant) == int(float(evalue_mant)):
evalue_mant = str(int(float(evalue_mant)))
if (evalue_exp[0] == '+'):
evalue_exp = evalue_exp[1::]
if evalue_exp == "00":
evalue_exp = '0'
return evalue_mant, evalue_exp
def printPreviousSubject(subject, file_ind):
if subject["find_right"]:
return
nonOverlapMatchLen = computeNonOverlappingMatchLength(subject)
percentIdent =int(round((float(subject["totalIdentities"])/ float(subject["totalLength"]) * 10 + 0.5)/10))
shorterLength = subject["queryLength"] if subject["queryShorter"] else subject["subjectLength"]
percentMatch = int(round((float(nonOverlapMatchLen) / float(shorterLength)* 1000 + 0.5) / 10))
if percentMatch > 50:
ans= str(subject["queryId"]+"\t"+subject["omcl_group"].rjust(12)+" "+subject["subjectId"].ljust(26) \
+ subject["evalueMant"].rjust(5)+" "+subject["evalueExp"].rjust(5)+" "\
+ str(percentIdent).rjust(5)+" "+ str(percentMatch).rjust(5) + "\n")
output_file.write(ans)
#print "PERCENT MAtch ", percentMatch
if subject["queryId"] in Right_Results:
right_subject = Right_Results[subject["queryId"]]
if subject["subjectId"] == right_subject[0]:
subject["find_right"] = True
if (subject["wrong_dist"]) != 0:
WRONG_MAP["number_wrong_taxon"] +=1
if subject["queryShorter"]:
WRONG_MAP["is_shorter"] +=1
dist_map[subject["wrong_dist"]] += 1
right_match = int(right_subject[4])
if right_match - percentMatch > 1 or right_match < -1:
WRONG_MAP["wrong_match_soft"] += 1
f_wrong_match_soft.write(subject["queryId"] + " "+file_ind + " " + str(subject["queryShorter"]) + "\n")
elif right_match != percentMatch:
WRONG_MAP["wrong_match"] += 1
if right_subject[5] != subject["omcl_group"]:
WRONG_MAP["wrong_group"] += 1
f_wrong_group(subject["queryId"] + " "+file_ind + " " + str(subject["queryShorter"]) + "\n")
if right_subject[3] != percentIdent:
WRONG_MAP["wrong_pident"] +=1
f_wrong_pident.write(subject["queryId"] + " "+file_ind + " " + str(subject["queryShorter"]) + "\n")
else:
subject["find_right"] = True
WRONG_MAP["no_taxon_in_right_res"] +=1
WRONG_MAP["number_wrong_taxon"] +=1
f_no_taxon_in_right_res.write(subject["queryId"] + " " +file_ind + " " + str(subject["queryShorter"]) + "\n")
if subject["queryShorter"]:
WRONG_MAP["is_shorter"] +=1
subject["wrong_dist"] += 1
def run_cmd(cmd_str):
#Throw exception if run command fails
iteration = 0
while (iteration < 10):
try:
process = subprocess.Popen(cmd_str, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
#stdout_str, stderr_str = process.communicate()
stdout_str, stderr_str = process.communicate()
if process.returncode != 0:
print "Failed to run '%s'\nNon-zero exit status %s" %(cmd_str, process.returncode)
break
except:
continue
def orthomcl_daemon():
print "daemon start"
#groups_OrthoMCL_file= open(options.omcl_file, 'r')
groups_OrthoMCL = {}
for line in open(options.omcl_file, 'r'):
group_proteins = line.split()
group_name = group_proteins.pop(0)[:-1]
for protein in group_proteins:
temp = protein.split('|')
if temp[0] not in groups_OrthoMCL:
groups_OrthoMCL[temp[0]] = {}
groups_OrthoMCL[ temp[0] ][temp[1]] = group_name
while not os.path.exists(os.path.join(temp_directory, "end")):
time.sleep(3)
print "wait blast"
f = open(os.path.join(temp_directory, "end"), 'r')
pr_q = int(f.read())
f.close()
os.remove(os.path.join(temp_directory, "end"))
completed_file_quantity = 0
while (completed_file_quantity < pr_q):
while not file_queue.empty():
#try:
file_ind = file_queue.get()
#learn blast file
completed_file_quantity +=1
f = open(file_ind, 'r')
if os.path.getsize(file_ind) < 10:
print completed_file_quantity, "/", pr_q
WRONG_MAP["empty"] +=1
continue
prevSubjectId = 'blah'
prevQueryId = 'blah'
subject = {} # hash to hold subject info
queryShorter = False
subject["find_right"] = False
subject["wrong_dist"] = 0
for line in f:
if subject["find_right"]:
break
if len(line) == 0:
continue
#qseqid sseqid pident length mismatch qstart qend sstart send evalue bitscore qlen slen
queryId, subjectId, percentIdentity, length, mismatches, \
queryStart, queryEnd, subjectStart, subjectEnd, evalue, bist, qlen, slen = line.split()
if queryId != prevQueryId or subjectId != prevSubjectId:
# print previous subject
if len(subject) > 2:
printPreviousSubject(subject, file_ind)
# initialize new one from first HSP
prevSubjectId = subjectId
prevQueryId = queryId
subject["hspspans"] = []
subject["totalIdentities"] = 0
subject["totalLength"] = 0
subject["queryId"] = queryId
subject["subjectId"] = subjectId
tmp = subject["subjectId"].split("|")
subject["omcl_group"] = "NO_GROUP"
if (tmp[0] in groups_OrthoMCL) and (tmp[1] in groups_OrthoMCL[tmp[0]]):
subject["omcl_group"] = groups_OrthoMCL[tmp[0]][tmp[1]]
subject["queryLength"] = int(qlen)
subject["subjectLength"] = int(slen)
subject["queryShorter"] = subject["queryLength"] < subject["subjectLength"]
subject["evalueMant"], subject["evalueExp"] = formatEvalue(evalue) # from first hsp
# get additional info from subsequent HSPs
hspspan = [int(subjectStart), int(subjectEnd)]
if subject["queryShorter"]:
hspspan = [int(queryStart), int(queryEnd)]
subject["hspspans"].append(hspspan)
subject["totalIdentities"] += float(percentIdentity) * float(length)
subject["totalLength"] += int(length)
#if first:
if len(subject) > 2:
printPreviousSubject(subject, file_ind)
if not subject["find_right"]:
if subject["queryId"] in Right_Results:
WRONG_MAP["not_exist_such_taxon_in_my_res"] += 1
WRONG_MAP["number_wrong_taxon"] +=1
f_not_exist_such_taxon_in_my_res.write(subject["queryId"] + " "+ file_ind + " " + str(subject["queryShorter"]) + "\n")
if subject["queryShorter"]:
WRONG_MAP["is_shorter"] +=1
else:
dist_map[0] += 1
print completed_file_quantity, "/", pr_q
#close all one protein file and remove them
f.close()
#os.remove(file_ind)
tmp = file_ind.split(".")
tmp[-1] = "segment"
os.remove(".".join(tmp))
tmp[-1] = "blastSuccess"
if os.path.exists(".".join(tmp)):
for i in range(1,10):
try:
os.remove(".".join(tmp))
break
except:
continue
#except Exception as e:
# print e
# continue
#os.rmdir(temp_directory)
print (time.time() - test_time)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Pipeline tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@follows(mkdir(temp_directory))
@split(original_fasta, os.path.join(temp_directory, "*.segment"))
def splitFasta (seqFile, segments):
#Split sequence file into
# as many fragments as appropriate
# depending on the size of original_fasta
#
# Clean up any segment files from previous runs before creating new one
#
for i in segments:
os.unlink(i)
current_file_name = 0
#current_size = 0
for line in open(original_fasta):
#
# start a new file for each accession line
#
if line[0] == '>':
current_file_name +=1
file_name = "%d.segment" % current_file_name
file_path = os.path.join(temp_directory, file_name)
current_file = open(file_path, "w")
#current_size = 0
if current_file_name != "":
current_file.write(line)
end_file = open(os.path.join(temp_directory, "end_tmp"), 'w')
end_file.write(str(current_file_name))
end_file.close()
os.rename(os.path.join(temp_directory, "end_tmp"), os.path.join(temp_directory, "end"))
@transform(splitFasta, suffix(".segment"), [".blastResult", ".blastSuccess"], file_queue)
def runBlast(seqFile, output_files, file_queue):
#
blastResultFile, flag_file = output_files
cmd_str = blastp + " -db %s -query %s -out %s -evalue 1e-5 -max_target_seqs 10000 -outfmt \"6 qseqid sseqid pident length mismatch qstart qend sstart send evalue bitscore qlen slen\""
run_cmd(cmd_str % (database_file, seqFile, blastResultFile))
file_queue.put(blastResultFile)
open(flag_file, "w")
#time.sleep(5)
f = open(blastResultFile, 'r')
file_len = 0
for line in f:
if not len(line):
continue
file_len += 1
if __name__ == '__main__':
if options.just_print:
pipeline_printout(sys.stdout, [runBlast], verbose=options.verbose)
elif options.flowchart:
# use file extension for output format
output_format = os.path.splitext(options.flowchart)[1][1:]
pipeline_printout_graph (open(options.flowchart, "w"),
output_format,
[combineBlastResults],
no_key_legend = True)
else:
result_daemon = Thread(target=orthomcl_daemon)
result_daemon.setDaemon(True)
result_daemon.start()
pipeline_run([runBlast], multiprocess = options.jobs,
logger = logger, verbose=options.verbose)
if (result_daemon.isAlive()):
result_daemon.join()
print "wrong_match_soft ", WRONG_MAP["wrong_match_soft"], "wrong_match ", WRONG_MAP["wrong_match"],\
"wrong_group ", WRONG_MAP["wrong_group"], "not_exist_such_taxon ", WRONG_MAP["not_exist_such_taxon_in_my_res"], \
"no_taxon_in_right_res", WRONG_MAP["no_taxon_in_right_res"],"wrong_taxon_number", \
WRONG_MAP["number_wrong_taxon"], "is_shorter", WRONG_MAP["is_shorter"], "empty", WRONG_MAP["empty"], "wrong_pident", WRONG_MAP["wrong_pident"]
string_map = ""
for i in range(1000):
if dist_map[i] != 0:
string_map += str(i) +"\t" + str(dist_map[i]) + "\n"
print string_map
|
Vmakarova/parallelblast
|
parallelblastwithcheck.py
|
Python
|
apache-2.0
| 17,887
|
[
"BLAST"
] |
9cc33bdedb07bfc9fb3dbbed69b83db752b753e6e96795984f86e76ced181085
|
# Lint as: python3
"""Statistical models based on Neural Networks."""
import dataclasses
from typing import Any, Callable
from epi_forecast_stat_mech.statistical_models import base
from epi_forecast_stat_mech.statistical_models import probability as stat_prob
from epi_forecast_stat_mech.statistical_models import tree_util
from flax.deprecated import nn
import jax
import jax.numpy as jnp
import tensorflow_probability as tfp
tfp = tfp.experimental.substrates.jax
tfd = tfp.distributions
class PerceptronModule(nn.Module):
"""Multi-layer perceptron network module."""
def apply(
self,
inputs,
output_size,
hidden_layer_sizes=(64, 64),
activation=jax.nn.relu,
batch_norm_style='once'
):
"""Computes the output of a multi-layer perceptron give `inputs`."""
x = inputs
if batch_norm_style == 'none' or batch_norm_style == 'once':
if batch_norm_style == 'once':
x = nn.BatchNorm(x)
for size in hidden_layer_sizes:
x = nn.Dense(x, size)
x = activation(x)
return nn.Dense(x, output_size)
elif batch_norm_style == 'layerwise_whiten':
for size in hidden_layer_sizes:
x = nn.BatchNorm(x, bias=False, scale=False)
x = nn.Dense(x, size)
x = activation(x)
x = nn.BatchNorm(x, bias=False, scale=False)
return nn.Dense(x, output_size)
else:
raise ValueError(f'Unexpected batch_norm_style: {batch_norm_style}')
class PlainLinearModule(
PerceptronModule.partial(
hidden_layer_sizes=(), activation=None, batch_norm_style='none')):
pass
class LinearModule(
PerceptronModule.partial(
hidden_layer_sizes=(), activation=None, batch_norm_style='once')):
pass
@dataclasses.dataclass
class NormalDistributionModel(base.StatisticalModel):
"""Statistical model based on normal distribution.
Attributes:
predict_module: flax.deprecated.nn.Module that takes `inputs` and `output_size`
arguments and returns array of shape `[batch, output_size]` that will be
used to predict locations of the gaussian distributed `observations` and
possibly scales, depending on whether `error_model` is 'full'.
error_model: (string) Either 'full' to represent using the predict_module
to estimate the error-scale (heteroscedastic). Or 'plugin' to
represent that the scale should be based on a homoscedastic plugin.
log_prior_fn: function that computes log_prior on a parameters of the
`predict_module`.
scale_eps: minimal value for the predicted scale if scale.
"""
predict_module: nn.Module = PerceptronModule
log_prior_fn: Callable[..., Any] = None
error_model: str = 'full'
scale_eps: float = 1e-2
def _output_size_and_unpack_fn(self, output_structure):
output_array, unpack_fn = tree_util.pack(output_structure)
output_size = output_array.shape[-1]
if self.error_model == 'full':
output_size *= 2
return output_size, unpack_fn
def init_parameters(self, rng, inputs, output_structure):
"""Returns initial parameters generated at model construction time."""
output_size, _ = self._output_size_and_unpack_fn(output_structure)
return self.predict_module.init(rng, inputs, output_size)[1]
def log_prior(self, parameters):
"""Returns the log probability of `parameters` based on priors.
Args:
parameters: parameters of the statistical model.
Returns:
log-probabilities for parameter.
"""
if self.log_prior_fn is None:
return 0.
return self.log_prior_fn(parameters)
def log_likelihood(self, parameters, covariates, observations):
"""Returns the log likelihood of `observations`.
Args:
parameters: parameters of the statistical model.
covariates: A numpy array of shape "location" x "static_covariate".
observations: A tree of mech_params that we want to explain.
Returns:
A log-likelihood. If self.error_model == 'full', the return is a tree of
the shape of observations (the log_likelihood of each observation). In the
'plugin' case, the return is a scalar (the sum).
"""
if self.error_model == 'full':
posterior = self.predict(parameters, covariates, observations)
res = tree_util.tree_multimap(
lambda p, o: p.log_prob(o), posterior, observations)
return res
elif self.error_model == 'plugin':
loc, _ = self.get_loc_scale(parameters, covariates, observations)
tree_error = tree_util.tree_multimap(
lambda hat, o: o - hat, loc, observations)
error, _ = tree_util.pack(tree_error)
logprob = stat_prob.gaussian_error_logprob_with_bottom_scale(
error, self.scale_eps, axis=0)
return logprob
else:
raise ValueError(f'unexpected error_model: {self.error_model}')
def get_loc_scale(self, parameters, covariates, observations):
"""Computes loc and scale for `observations` based on `covariates`.
Args:
parameters: parameters of the statistical model.
covariates: array representing covariates for each location.
observations: structure of observations to be predicted.
Returns:
pytree of probability distributions over `observations` given the
`parameters` of the statistical model and `covariates`.
"""
output_size, unpack_fn = self._output_size_and_unpack_fn(observations)
raw_predictions = self.predict_module.call(
parameters, covariates, output_size)
if self.error_model == 'full':
loc, raw_scale = jnp.split(raw_predictions, 2, -1)
scale = jax.nn.softplus(raw_scale) + self.scale_eps
return unpack_fn(loc), unpack_fn(scale)
elif self.error_model == 'plugin':
loc = raw_predictions
return unpack_fn(loc), None
else:
raise ValueError(f'unexpected error_model: {self.error_model}')
def predict(self, parameters, covariates, observations):
"""Predicts a distribution over `observations` based on `covariates`.
Args:
parameters: parameters of the statistical model.
covariates: array representing covariates for each location.
observations: structure of observations to be predicted.
Returns:
pytree of probability distributions over `observations` given the
`parameters` of the statistical model and `covariates`.
"""
loc, scale = self.get_loc_scale(parameters, covariates, observations)
return tree_util.tree_multimap(
lambda l, s: tfd.Normal(loc=l, scale=s),
loc, scale)
def linear_coefficients(self, parameters):
dense_name = [x for x in parameters.keys() if 'Dense' in x][0]
kernel = parameters[dense_name]['kernel']
bias = parameters[dense_name]['bias']
if self.error_model == 'full':
kernel, _ = jnp.split(kernel, 2, -1)
bias, _ = jnp.split(bias, 2, -1)
return kernel, bias
|
HopkinsIDD/EpiForecastStatMech
|
epi_forecast_stat_mech/statistical_models/network_models.py
|
Python
|
apache-2.0
| 6,853
|
[
"Gaussian"
] |
24e46c9df70d543f3bfef1128c4d8287f2835c2720ff6aa9a5aa89e24ba93452
|
#!/usr/bin/env python
import glob, os, os.path, shutil, socket, struct, tarfile, stat
import numpy, sys, presto, time, sigproc, sifting
import psr_utils as pu
institution = "NRAOCV"
base_tmp_dir = "/dev/shm/"
base_output_dir = "/home/sransom/results/GBT/drift"
#-------------------------------------------------------------------
# Tunable parameters for searching and folding
# (you probably don't need to tune any of them)
orig_N = 1728000 # Number of samples to analyze at a time (~141 sec)
raw_N = 1900000 # Number of samples to step through .fits files
overlap_factor = 0.5 # Overlap each orig_N samples by this fraction
rfifind_chunk_time = 25600 * 0.00008192 # ~2.1 sec
singlepulse_threshold = 5.0 # threshold SNR for candidate determination
singlepulse_plot_SNR = 5.5 # threshold SNR for singlepulse plot
singlepulse_maxwidth = 0.1 # max pulse width in seconds
to_prepfold_sigma = 6.0 # incoherent sum significance to fold candidates
max_lo_cands_to_fold = 20 # Never fold more than this many lo-accel candidates
max_hi_cands_to_fold = 10 # Never fold more than this many hi-accel candidates
numhits_to_fold = 2 # Number of DMs with a detection needed to fold
low_DM_cutoff = 1.0 # Lowest DM to consider as a "real" pulsar
lo_accel_numharm = 16 # max harmonics
lo_accel_sigma = 2.0 # threshold gaussian significance
lo_accel_zmax = 0 # bins
lo_accel_flo = 2.0 # Hz
hi_accel_numharm = 8 # max harmonics
hi_accel_sigma = 3.0 # threshold gaussian significance
hi_accel_zmax = 50 # bins
hi_accel_flo = 1.0 # Hz
low_T_to_search = 50.0 # sec
# Sifting specific parameters (don't touch without good reason!)
sifting.sigma_threshold = to_prepfold_sigma-1.0 # incoherent power threshold (sigma)
sifting.c_pow_threshold = 100.0 # coherent power threshold
sifting.r_err = 1.1 # Fourier bin tolerence for candidate equivalence
sifting.short_period = 0.0005 # Shortest period candidates to consider (s)
sifting.long_period = 15.0 # Longest period candidates to consider (s)
sifting.harm_pow_cutoff = 8.0 # Power required in at least one harmonic
#-------------------------------------------------------------------
def get_baryv(ra, dec, mjd, T, obs="GB"):
"""
get_baryv(ra, dec, mjd, T):
Determine the average barycentric velocity towards 'ra', 'dec'
during an observation from 'obs'. The RA and DEC are in the
standard string format (i.e. 'hh:mm:ss.ssss' and 'dd:mm:ss.ssss').
'T' is in sec and 'mjd' is (of course) in MJD.
"""
tts = pu.span(mjd, mjd+T/86400.0, 100)
nn = len(tts)
bts = numpy.zeros(nn, dtype=numpy.float64)
vel = numpy.zeros(nn, dtype=numpy.float64)
presto.barycenter(tts, bts, vel, nn, ra, dec, obs, "DE200")
return vel.mean()
def find_masked_fraction(obs):
"""
find_masked_fraction(obs):
Parse the output file from an rfifind run and return the
fraction of the data that was suggested to be masked.
"""
rfifind_out = obs.basefilenm + "_rfifind.out"
for line in open(rfifind_out):
if "Number of bad intervals" in line:
return float(line.split("(")[1].split("%")[0])/100.0
# If there is a problem reading the file, return 100%
return 100.0
def timed_execute(cmd, run_cmd=1):
"""
timed_execute(cmd):
Execute the command 'cmd' after logging the command
to STDOUT. Return the wall-clock amount of time
the command took to execute.
"""
sys.stdout.write("\n'"+cmd+"'\n")
sys.stdout.flush()
start = time.time()
if run_cmd: os.system(cmd)
end = time.time()
return end - start
def get_folding_command(cand, obs, ddplans):
"""
get_folding_command(cand, obs, ddplans):
Return a command for prepfold for folding the subbands using
an obs_info instance, a list of the ddplans, and a candidate
instance that describes the observations and searches.
"""
# Folding rules are based on the facts that we want:
# 1. Between 24 and 200 bins in the profiles
# 2. For most candidates, we want to search length = 101 p/pd/DM cubes
# (The side of the cube is always 2*M*N+1 where M is the "factor",
# either -npfact (for p and pd) or -ndmfact, and N is the number of bins
# in the profile). A search of 101^3 points is pretty fast.
# 3. For slow pulsars (where N=100 or 200), since we'll have to search
# many points, we'll use fewer intervals in time (-npart 30)
# 4. For the slowest pulsars, in order to avoid RFI, we'll
# not search in period-derivative.
zmax = cand.filename.split("_")[-1]
outfilenm = obs.basefilenm+"_DM%s_Z%s"%(cand.DMstr, zmax)
hidms = [x.lodm for x in ddplans[1:]] + [2000]
dfacts = [x.downsamp for x in ddplans]
for hidm, dfact in zip(hidms, dfacts):
if cand.DM < hidm:
downsamp = dfact
break
if downsamp==1:
filfile = obs.fil_filenm
else:
filfile = obs.basefilenm+"_DS%d.fil"%downsamp
p = 1.0 / cand.f
if (p < 0.002):
Mp, Mdm, N = 2, 2, 24
otheropts = "-npart 50 -ndmfact 3"
elif p < 0.05:
Mp, Mdm, N = 2, 1, 50
otheropts = "-npart 40 -pstep 1 -pdstep 2 -dmstep 3"
elif p < 0.5:
Mp, Mdm, N = 1, 1, 100
otheropts = "-npart 30 -pstep 1 -pdstep 2 -dmstep 1"
else:
Mp, Mdm, N = 1, 1, 200
otheropts = "-npart 30 -nopdsearch -pstep 1 -pdstep 2 -dmstep 1"
return "prepfold -noxwin -accelcand %d -accelfile %s.cand -dm %.2f -o %s %s -n %d -npfact %d -ndmfact %d %s" % \
(cand.candnum, cand.filename, cand.DM, outfilenm,
otheropts, N, Mp, Mdm, filfile)
class obs_info:
"""
class obs_info(fil_filenm)
A class describing the observation and the analysis.
"""
def __init__(self, fil_filenm):
self.fil_filenm = fil_filenm
self.basefilenm = fil_filenm[:fil_filenm.find(".fil")]
filhdr, hdrlen = sigproc.read_header(fil_filenm)
self.MJD = filhdr['tstart']
self.nchans = filhdr['nchans']
self.ra_rad = sigproc.ra2radians(filhdr['src_raj'])
self.ra_string = pu.coord_to_string(*pu.rad_to_hms(self.ra_rad))
self.dec_rad = sigproc.dec2radians(filhdr['src_dej'])
self.dec_string = pu.coord_to_string(*pu.rad_to_dms(self.dec_rad))
self.str_coords = "J"+"".join(self.ra_string.split(":")[:2])
if self.dec_rad >= 0.0: self.str_coords += "+"
self.str_coords += "".join(self.dec_string.split(":")[:2])
self.az = filhdr['az_start']
self.el = 90.0-filhdr['za_start']
fillen = os.stat(fil_filenm)[6]
self.raw_N = (fillen-hdrlen)/(filhdr['nbits']/8)/filhdr['nchans']
self.dt = filhdr['tsamp']
self.raw_T = self.raw_N * self.dt
self.N = orig_N
self.T = self.N * self.dt
# Determine the average barycentric velocity of the observation
self.baryv = get_baryv(self.ra_string, self.dec_string,
self.MJD, self.T, obs="GB")
# Where to dump all the results
# Directory structure is under the base_output_directory
# according to base/MJD/filenmbase/beam
self.outputdir = os.path.join(base_output_dir,
str(int(self.MJD)),
self.str_coords)
# Figure out which host we are processing on
self.hostname = socket.gethostname()
# The fraction of the data recommended to be masked by rfifind
self.masked_fraction = 0.0
# Initialize our timers
self.rfifind_time = 0.0
self.downsample_time = 0.0
self.dedispersing_time = 0.0
self.FFT_time = 0.0
self.lo_accelsearch_time = 0.0
self.hi_accelsearch_time = 0.0
self.singlepulse_time = 0.0
self.sifting_time = 0.0
self.folding_time = 0.0
self.total_time = 0.0
# Inialize some candidate counters
self.num_sifted_cands = 0
self.num_folded_cands = 0
self.num_single_cands = 0
def write_report(self, filenm):
report_file = open(filenm, "w")
report_file.write("---------------------------------------------------------\n")
report_file.write("%s was processed on %s\n"%(self.fil_filenm, self.hostname))
report_file.write("Ending UTC time: %s\n"%(time.asctime(time.gmtime())))
report_file.write("Total wall time: %.1f s (%.2f hrs)\n"%\
(self.total_time, self.total_time/3600.0))
report_file.write("Fraction of data masked: %.2f%%\n"%\
(self.masked_fraction*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.write(" rfifind time = %7.1f sec (%5.2f%%)\n"%\
(self.rfifind_time, self.rfifind_time/self.total_time*100.0))
report_file.write(" dedispersing time = %7.1f sec (%5.2f%%)\n"%\
(self.dedispersing_time, self.dedispersing_time/self.total_time*100.0))
report_file.write(" single-pulse time = %7.1f sec (%5.2f%%)\n"%\
(self.singlepulse_time, self.singlepulse_time/self.total_time*100.0))
report_file.write(" FFT time = %7.1f sec (%5.2f%%)\n"%\
(self.FFT_time, self.FFT_time/self.total_time*100.0))
report_file.write(" lo-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.lo_accelsearch_time, self.lo_accelsearch_time/self.total_time*100.0))
report_file.write(" hi-accelsearch time = %7.1f sec (%5.2f%%)\n"%\
(self.hi_accelsearch_time, self.hi_accelsearch_time/self.total_time*100.0))
report_file.write(" sifting time = %7.1f sec (%5.2f%%)\n"%\
(self.sifting_time, self.sifting_time/self.total_time*100.0))
report_file.write(" folding time = %7.1f sec (%5.2f%%)\n"%\
(self.folding_time, self.folding_time/self.total_time*100.0))
report_file.write("---------------------------------------------------------\n")
report_file.close()
class dedisp_plan:
"""
class dedisp_plan(lodm, dmstep, dmsperpass, numpasses, numsub, downsamp)
A class describing a de-dispersion plan for prepsubband in detail.
"""
def __init__(self, lodm, dmstep, dmsperpass, numpasses, numsub, downsamp):
self.lodm = float(lodm)
self.dmstep = float(dmstep)
self.dmsperpass = int(dmsperpass)
self.numpasses = int(numpasses)
self.numsub = int(numsub)
self.downsamp = int(downsamp)
self.sub_dmstep = self.dmsperpass * self.dmstep
self.dmlist = [] # These are strings for comparison with filenames
self.subdmlist = []
for ii in range(self.numpasses):
self.subdmlist.append("%.2f"%(self.lodm + (ii+0.5)*self.sub_dmstep))
lodm = self.lodm + ii * self.sub_dmstep
dmlist = ["%.2f"%dm for dm in \
numpy.arange(self.dmsperpass)*self.dmstep + lodm]
self.dmlist.append(dmlist)
def main(fil_filenm, workdir, ddplans):
# Change to the specified working directory
os.chdir(workdir)
# Get information on the observation and the job
job = obs_info(fil_filenm)
if job.raw_T < low_T_to_search:
print "The observation is too short (%.2f s) to search."%job.raw_T
sys.exit()
job.total_time = time.time()
ddplans = ddplans[job.nchans]
# Use whatever .zaplist is found in the current directory
default_zaplist = glob.glob("*.zaplist")[0]
# Make sure the output directory (and parent directories) exist
try:
os.makedirs(job.outputdir)
os.chmod(job.outputdir, stat.S_IRWXU | stat.S_IRWXG | S_IROTH | S_IXOTH)
except: pass
# Make sure the tmp directory (in a tmpfs mount) exists
tmpdir = os.path.join(base_tmp_dir, job.basefilenm)
try:
os.makedirs(tmpdir)
except: pass
print "\nBeginning GBT350 driftscan search of '%s'"%job.fil_filenm
print "UTC time is: %s"%(time.asctime(time.gmtime()))
# rfifind the filterbank file
cmd = "rfifind -time %.17g -o %s %s > %s_rfifind.out"%\
(rfifind_chunk_time, job.basefilenm,
job.fil_filenm, job.basefilenm)
job.rfifind_time += timed_execute(cmd)
maskfilenm = job.basefilenm + "_rfifind.mask"
# Find the fraction that was suggested to be masked
# Note: Should we stop processing if the fraction is
# above some large value? Maybe 30%?
job.masked_fraction = find_masked_fraction(job)
# Iterate over the stages of the overall de-dispersion plan
dmstrs = []
for ddplan in ddplans:
# Make a downsampled filterbank file
if ddplan.downsamp > 1:
cmd = "downsample_filterbank.py %d %s"%(ddplan.downsamp, job.fil_filenm)
job.downsample_time += timed_execute(cmd)
fil_filenm = job.fil_filenm[:job.fil_filenm.find(".fil")] + \
"_DS%d.fil"%ddplan.downsamp
else:
fil_filenm = job.fil_filenm
# Iterate over the individual passes through the .fil file
for passnum in range(ddplan.numpasses):
subbasenm = "%s_DM%s"%(job.basefilenm, ddplan.subdmlist[passnum])
# Now de-disperse
cmd = "prepsubband -mask %s -lodm %.2f -dmstep %.2f -nsub %d -numdms %d -numout %d -o %s/%s %s"%\
(maskfilenm, ddplan.lodm+passnum*ddplan.sub_dmstep, ddplan.dmstep,
ddplan.numsub, ddplan.dmsperpass, job.N/ddplan.downsamp,
tmpdir, job.basefilenm, fil_filenm)
job.dedispersing_time += timed_execute(cmd)
# Iterate over all the new DMs
for dmstr in ddplan.dmlist[passnum]:
dmstrs.append(dmstr)
basenm = os.path.join(tmpdir, job.basefilenm+"_DM"+dmstr)
datnm = basenm+".dat"
fftnm = basenm+".fft"
infnm = basenm+".inf"
# Do the single-pulse search
cmd = "single_pulse_search.py -p -m %f -t %f %s"%\
(singlepulse_maxwidth, singlepulse_threshold, datnm)
job.singlepulse_time += timed_execute(cmd)
try:
shutil.move(basenm+".singlepulse", workdir)
except: pass
# FFT, zap, and de-redden
cmd = "realfft %s"%datnm
job.FFT_time += timed_execute(cmd)
cmd = "zapbirds -zap -zapfile %s -baryv %.6g %s"%\
(default_zaplist, job.baryv, fftnm)
job.FFT_time += timed_execute(cmd)
cmd = "rednoise %s"%fftnm
job.FFT_time += timed_execute(cmd)
try:
os.rename(basenm+"_red.fft", fftnm)
except: pass
# Do the low-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(lo_accel_numharm, lo_accel_sigma, lo_accel_zmax, lo_accel_flo, fftnm)
job.lo_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%lo_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%lo_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%lo_accel_zmax, workdir)
except: pass
# Do the high-acceleration search
cmd = "accelsearch -numharm %d -sigma %f -zmax %d -flo %f %s"%\
(hi_accel_numharm, hi_accel_sigma, hi_accel_zmax, hi_accel_flo, fftnm)
job.hi_accelsearch_time += timed_execute(cmd)
try:
os.remove(basenm+"_ACCEL_%d.txtcand"%hi_accel_zmax)
except: pass
try: # This prevents errors if there are no cand files to copy
shutil.move(basenm+"_ACCEL_%d.cand"%hi_accel_zmax, workdir)
shutil.move(basenm+"_ACCEL_%d"%hi_accel_zmax, workdir)
except: pass
# Move the .inf files
try:
shutil.move(infnm, workdir)
except: pass
# Remove the .dat and .fft files
try:
os.remove(datnm)
except: pass
try:
os.remove(fftnm)
except: pass
# Make the single-pulse plots
basedmb = job.basefilenm+"_DM"
basedme = ".singlepulse "
# The following will make plots for DM ranges:
# 0-30, 20-110, 100-310, 300-1000+
dmglobs = [basedmb+"[0-9].[0-9][0-9]"+basedme +
basedmb+"[012][0-9].[0-9][0-9]"+basedme,
basedmb+"[2-9][0-9].[0-9][0-9]"+basedme +
basedmb+"10[0-9].[0-9][0-9]"+basedme,
basedmb+"[12][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"30[0-9].[0-9][0-9]"+basedme,
basedmb+"[3-9][0-9][0-9].[0-9][0-9]"+basedme +
basedmb+"1[0-9][0-9][0-9].[0-9][0-9]"+basedme]
dmrangestrs = ["0-30", "20-110", "100-310", "300-1000+"]
psname = job.basefilenm+"_singlepulse.ps"
for dmglob, dmrangestr in zip(dmglobs, dmrangestrs):
cmd = 'single_pulse_search.py -t %f -g "%s"' % \
(singlepulse_plot_SNR, dmglob)
job.singlepulse_time += timed_execute(cmd)
try:
os.rename(psname,
job.basefilenm+"_DMs%s_singlepulse.ps"%dmrangestr)
except: pass
# Sift through the candidates to choose the best to fold
job.sifting_time = time.time()
lo_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%lo_accel_zmax))
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_duplicate_candidates(lo_accel_cands)
if len(lo_accel_cands):
lo_accel_cands = sifting.remove_DM_problems(lo_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
if len(lo_accel_cands):
lo_accel_cands.sort(sifting.cmp_sigma)
sifting.write_candlist(lo_accel_cands,
job.basefilenm+".accelcands_Z%d"%lo_accel_zmax)
hi_accel_cands = sifting.read_candidates(glob.glob("*ACCEL_%d"%hi_accel_zmax))
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_duplicate_candidates(hi_accel_cands)
if len(hi_accel_cands):
hi_accel_cands = sifting.remove_DM_problems(hi_accel_cands, numhits_to_fold,
dmstrs, low_DM_cutoff)
if len(hi_accel_cands):
hi_accel_cands.sort(sifting.cmp_sigma)
sifting.write_candlist(hi_accel_cands,
job.basefilenm+".accelcands_Z%d"%hi_accel_zmax)
try:
cmd = "mv *.accelcands* "+job.outputdir
os.system(cmd)
except: pass
job.sifting_time = time.time() - job.sifting_time
# Fold the best candidates
cands_folded = 0
for cand in lo_accel_cands:
if cands_folded == max_lo_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
cands_folded += 1
cands_folded = 0
for cand in hi_accel_cands:
if cands_folded == max_hi_cands_to_fold:
break
elif cand.sigma > to_prepfold_sigma:
job.folding_time += timed_execute(get_folding_command(cand, job, ddplans))
cands_folded += 1
# Remove the bestprof files
bpfiles = glob.glob("*.pfd.bestprof")
for bpfile in bpfiles:
os.remove(bpfile)
# Now step through the .ps files and convert them to .png and gzip them
psfiles = glob.glob("*.ps")
for psfile in psfiles:
if "singlepulse" in psfile:
# For some reason the singlepulse files don't transform nicely...
epsfile = psfile.replace(".ps", ".eps")
os.system("eps2eps "+psfile+" "+epsfile)
os.system("pstoimg -density 100 -crop a "+epsfile)
try:
os.remove(epsfile)
except: pass
else:
os.system("pstoimg -density 100 -flip cw "+psfile)
os.system("gzip "+psfile)
# Tar up the results files
tar_suffixes = ["_ACCEL_%d.tgz"%lo_accel_zmax,
"_ACCEL_%d.tgz"%hi_accel_zmax,
"_ACCEL_%d.cand.tgz"%lo_accel_zmax,
"_ACCEL_%d.cand.tgz"%hi_accel_zmax,
"_singlepulse.tgz",
"_inf.tgz",
"_pfd.tgz"]
tar_globs = ["*_ACCEL_%d"%lo_accel_zmax,
"*_ACCEL_%d"%hi_accel_zmax,
"*_ACCEL_%d.cand"%lo_accel_zmax,
"*_ACCEL_%d.cand"%hi_accel_zmax,
"*.singlepulse",
"*_DM[0-9]*.inf",
"*.pfd"]
for (tar_suffix, tar_glob) in zip(tar_suffixes, tar_globs):
tf = tarfile.open(job.basefilenm+tar_suffix, "w:gz")
for infile in glob.glob(tar_glob):
tf.add(infile)
os.remove(infile)
tf.close()
# Remove all the downsampled .fil files
filfiles = glob.glob("*_DS?.fil") + glob.glob("*_DS??.fil")
for filfile in filfiles:
os.remove(filfile)
# Remove the tmp directory (in a tmpfs mount)
try:
os.rmdir(tmpdir)
except: pass
# And finish up
job.total_time = time.time() - job.total_time
print "\nFinished"
print "UTC time is: %s"%(time.asctime(time.gmtime()))
# Write the job report
job.write_report(job.basefilenm+".report")
job.write_report(os.path.join(job.outputdir, job.basefilenm+".report"))
# Move all the important stuff to the output directory
cmd = "mv *rfifind.[bimors]* *.tgz *.ps.gz *.png *.report "+\
job.outputdir
os.system(cmd)
if __name__ == "__main__":
# Create our de-dispersion plans
ddplans = {1024:[], 2048:[]}
if (0):
# The following are the near-optimal values for 1024 and 2048 lags.
# They keeps the total dispersive smearing (i.e.
# not counting scattering) <1 ms up to a DM of ~100 pc cm^-3 for 1024-lag
# data and ~200 pc cm^-3 for 2048-lag data.
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.02, 20, 91, 32, 1))
ddplans[1024].append(dedisp_plan( 36.4, 0.03, 24, 30, 32, 2))
ddplans[1024].append(dedisp_plan( 58.0, 0.05, 24, 35, 32, 4))
ddplans[1024].append(dedisp_plan( 100.0, 0.10, 24, 40, 32, 8))
ddplans[1024].append(dedisp_plan( 196.0, 0.30, 22, 45, 32, 16))
ddplans[1024].append(dedisp_plan( 493.0, 0.50, 24, 30, 32, 32))
ddplans[1024].append(dedisp_plan( 853.0, 1.00, 24, 7, 32, 64))
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.02, 20, 177, 32, 1))
ddplans[2048].append(dedisp_plan( 70.8, 0.03, 24, 60, 32, 2))
ddplans[2048].append(dedisp_plan( 114.0, 0.05, 24, 65, 32, 4))
ddplans[2048].append(dedisp_plan( 192.0, 0.10, 24, 80, 32, 8))
ddplans[2048].append(dedisp_plan( 384.0, 0.30, 22, 80, 32, 16))
ddplans[2048].append(dedisp_plan( 912.0, 0.50, 24, 8, 32, 32))
elif (0):
#
# If there is <=1GB of RAM per node, the following are preferred
#
# DDplan.py -f 350.0 -b 50.0 -n 1024 -t 0.00008192 -s 64 -r 0.2
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.03, 50, 37, 64, 2))
ddplans[1024].append(dedisp_plan( 55.5, 0.05, 50, 17, 64, 4))
ddplans[1024].append(dedisp_plan( 98.0, 0.10, 50, 19, 64, 8))
ddplans[1024].append(dedisp_plan( 193.0, 0.20, 50, 19, 64, 16))
ddplans[1024].append(dedisp_plan( 383.0, 0.50, 50, 19, 64, 32))
ddplans[1024].append(dedisp_plan( 858.0, 1.00, 50, 3, 64, 64))
# DDplan.py -f 350.0 -b 50.0 -n 2048 -t 0.00008192 -s 64 -r 0.2
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.03, 50, 74, 64, 2))
ddplans[2048].append(dedisp_plan( 111.0, 0.05, 50, 33, 64, 4))
ddplans[2048].append(dedisp_plan( 193.5, 0.10, 50, 38, 64, 8))
ddplans[2048].append(dedisp_plan( 383.5, 0.20, 50, 38, 64, 16))
ddplans[2048].append(dedisp_plan( 763.5, 0.50, 50, 10, 64, 32))
elif (1):
#
# If there is 2GB or more RAM per node, the following are probably faster
#
# DDplan.py -f 350.0 -b 50.0 -n 1024 -t 0.00008192 -s 128 -r 0.2
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.03, 100, 19, 128, 2))
ddplans[1024].append(dedisp_plan( 57.0, 0.05, 100, 8, 128, 4))
ddplans[1024].append(dedisp_plan( 97.0, 0.10, 100, 10, 128, 8))
ddplans[1024].append(dedisp_plan( 197.0, 0.20, 100, 10, 128, 16))
ddplans[1024].append(dedisp_plan( 397.0, 0.50, 100, 10, 128, 32))
ddplans[1024].append(dedisp_plan( 897.0, 1.00, 100, 2, 128, 64))
# DDplan.py -f 350.0 -b 50.0 -n 2048 -t 0.00008192 -s 128 -r 0.2
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.03, 100, 37, 128, 2))
ddplans[2048].append(dedisp_plan( 111.0, 0.05, 100, 17, 128, 4))
ddplans[2048].append(dedisp_plan( 196.0, 0.10, 100, 19, 128, 8))
ddplans[2048].append(dedisp_plan( 386.0, 0.20, 100, 19, 128, 16))
ddplans[2048].append(dedisp_plan( 766.0, 0.50, 100, 5, 128, 32))
elif (0):
#
# This is for "quick" processing
#
# DDplan.py -f 350.0 -b 50.0 -n 1024 -t 0.00008192 -s 128 -r 1.5
# For 1024 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[1024].append(dedisp_plan( 0.0, 0.20, 100, 20, 128, 16))
ddplans[1024].append(dedisp_plan( 400.0, 0.50, 100, 10, 128, 32))
ddplans[1024].append(dedisp_plan( 900.0, 1.00, 100, 2, 128, 64))
# DDplan.py -f 350.0 -b 50.0 -n 2048 -t 0.00008192 -s 128 -r 1.5
# For 2048 chan data: lodm dmstep dms/call #calls #subs downsamp
ddplans[2048].append(dedisp_plan( 0.0, 0.20, 100, 39, 128, 16))
ddplans[2048].append(dedisp_plan( 780.0, 0.50, 100, 5, 128, 32))
# Arguments to the search program are
# sys.argv[1] = filterbank file name
# sys.argv[2] = working directory name
if len(sys.argv) >= 3:
workdir = sys.argv[2]
fil_filenm = sys.argv[1]
main(fil_filenm, workdir, ddplans)
elif len(sys.argv) == 2:
fil_filenm = sys.argv[1]
main(fil_filenm, '.', ddplans)
else:
print "GBT350_drift_search.py fil_filenm [workdir]"
|
pscholz/presto
|
bin/GBT350_drift_search.py
|
Python
|
gpl-2.0
| 28,291
|
[
"Gaussian"
] |
adf64eb04a3623cad961d23289acbbcccd0a9396b380965df74b7e3691f71a22
|
import torch
from src.python.preprocess2 import *
from src.python.geneontology import *
from src.python.baselines import *
from src.python.dingo_utils import *
from src.python.dingo_net2 import *
from tqdm import tqdm
import itertools
# def get_metric(metric):
# cache = dict()
#
# def do_metric(seq1, seq2):
# key1 = (seq1.uid, seq2.uid)
# key2 = (seq2.uid, seq1.uid)
# if key1 in cache:
# val = cache[key1]
# elif key2 in cache:
# val = cache[key2]
# else:
# val = metric(seq1, seq2)
# cache[key1] = val
# cache[key2] = val
# return val
#
# return do_metric
def get_f(metric, agg):
return lambda seq, sequences: agg([metric(seq, other) for other in sequences])
def is_seq_in_node(seq, node, use_prior=True): # assuming f computed for seq and f_dist_in/out computed for node
f_seq_node = seq.f[node] # precompute f(seq, node)
prior = node.prior if use_prior else 1.0
prob_f_given_node = node.f_dist_in(f_seq_node)
prior_prob_f_node = node.f_dist_out(f_seq_node)
return prior * (prob_f_given_node / prior_prob_f_node)
def compute_f_inside(f, seq, node, submitted, pbar):
if not node.is_leaf():
for child in node.children:
if seq not in child.sequences:
continue
task = compute_f_inside(f, seq, child, submitted, pbar)
if verbose: print("wait %s" % child)
assert child in submitted
val = task.result()
seq.f[child] = val # wait for children's results
if verbose: print("finished %s f_val=%.2f" % (child, val))
try:
task = submitted[node]
except KeyError:
assert seq in node.sequences
task = E.submit(f, seq, node.sequences - {seq})
assert node not in submitted
submitted[node] = task
if pbar: pbar.update(1)
return task
def compute_f_outside(f, seq, node, submitted, pbar):
if not node.is_leaf():
for child in node.children:
task = compute_f_outside(f, seq, child, submitted, pbar)
if verbose: print("wait %s" % child)
assert child in submitted
val = task.result()
seq.f[child] = val # wait for children's results
if verbose: print("finished %s f_val=%.2f" % (child, val))
try:
task = submitted[node]
except KeyError:
task = E.submit(f, seq, node.sequences - {seq})
assert node not in submitted
submitted[node] = task
if pbar: pbar.update(1)
return task
def predict_seq(graph, seq, f):
predictions = {}
run_compute_f(f, [seq], graph, compute_f_outside)
for node in graph:
score = is_seq_in_node(seq, node)
predictions[node.go] = score
return predictions
def estimate_distributions(graph, precomputed_sequences, attr_name):
for node in graph:
dataset = [seq.f[node] for seq in precomputed_sequences]
setattr(graph, attr_name, get_distribution(dataset))
def get_blast(blast, evalue):
def do_blast(seq1, seq2):
hits = blast.get_hits(seq1, seq2)
if len(hits) > 0:
hit = hits[np.argmin([h.evalue for h in hits])]
return hit.bitscore
else:
hit = blast.blastp(seq1, seq2, evalue=evalue)
return hit.bitscore
return do_blast
def cleanup():
files = os.listdir(tmp_dir)
for file in files:
if file.endswith(".seq") or file.endswith(".out"):
os.remove(os.path.join(tmp_dir, file))
def run_compute_f(f, seqs, g, method): # method in [compute_f_outside, compute_f_inside]
# pbar = tqdm(range(len(seqs)), desc="sequences processed")
for i, seq in enumerate(seqs):
pbar = tqdm(range(len(g)), desc="[%s] (%d/%d) nodes processed" % (seq.uid, i + 1, len(seqs)))
root_task = method(f, seq, g.root, {}, pbar=pbar)
root_task.result() # wait for all other tasks to finish
# pbar.update(1)
pbar.close()
# pbar.close()
def propagate(leaf, include_root=False):
Q = [leaf]
visited = {leaf}
while Q:
node = Q.pop()
for father in node.fathers:
if not include_root and father.is_root():
continue
if father not in visited:
visited.add(father)
Q.append(father)
return visited
def get_leaves(node_set):
leaf_set = set()
prop_set = set()
for node in node_set:
prop_set |= propagate(node)
for node in node_set:
if node.is_leaf():
leaf_set.add(node)
else:
children = set(node.children)
if len(prop_set & children) == 0:
leaf_set.add(node)
return leaf_set
def predict_by_similarity(target_seq, nodes, metric, pbar, agg=np.mean):
tasks = {}
preds = {}
for node in nodes:
pbar.update(1)
if target_seq not in node.seq2vec:
continue
vector = node.seq2vec[target_seq]
vectors = [node.seq2vec[seq] for seq in node.sequences]
task = E.submit(metric, vector, vectors)
tasks[node] = task
for node, task in tasks.items():
cos_similarity_arr = task.result()
assert len(cos_similarity_arr) > 0
preds[node] = agg(cos_similarity_arr)
return preds
def predict_by_ks(target_seq, nodes, metric, pbar):
tasks = {}
preds = {}
for node in nodes:
pbar.update(1)
if target_seq not in node.seq2vec:
continue
vector = node.seq2vec[target_seq]
vectors = [node.seq2vec[seq] for seq in node.sequences]
task = E.submit(metric, vector, vectors)
tasks[node] = task
for node, task in tasks.items():
cos_similarity_arr = task.result()
assert len(cos_similarity_arr) > 0
_, alpha = ks_2samp(node.dataset, cos_similarity_arr)
preds[node] = alpha
return preds
def preds_by_attr(hits_per_uid, attr, nb=None):
preds = {}
pbar = tqdm(range(len(hits_per_uid)), desc="sequences processed")
for uid, hits in hits_per_uid.items():
pbar.update(1)
preds[uid] = {}
if len(hits) == 0:
continue
for go, hits in hits.items():
assert go != graph.root.go
hs = [getattr(h, attr) for h in hits if h.evalue < 0.001]
if len(hs) == 0:
continue
if nb:
preds[uid][go] = nb.infer(max(hs), graph[go].prior)
else:
preds[uid][go] = max(hs)
pbar.close()
return preds
def propagate_leaf_predictions(leaf_predictions, choose_max_prob=False):
node2probs = {}
predictions = {}
for leaf, prob in leaf_predictions.items():
ancestors = propagate(leaf)
for node in ancestors:
if node in node2probs:
node2probs[node].append(prob)
else:
node2probs[node] = [prob]
for node, probs in node2probs.items():
if choose_max_prob:
predictions[node.go] = max(probs)
else:
predictions[node.go] = 1 - np.prod([1 - pr for pr in probs])
return predictions
def compute_datasets(metric, nodes):
all_tasks = {}
for node in nodes:
for seq in node.sequences:
vec = node.seq2vec[seq]
vectors = [node.seq2vec[s] for s in node.sequences - {seq}]
task = E.submit(metric, vec, vectors)
if node in all_tasks:
all_tasks[node].append(task)
else:
all_tasks[node] = [task]
assert len(all_tasks) == len(leaves)
pbar = tqdm(range(len(leaves)), desc="leaves processed")
for node, tasks in all_tasks.items():
node.dataset = []
for task in tasks:
results = task.result()
node.dataset.extend(results)
assert len(node.dataset) > 0
pbar.update(1)
pbar.close()
if __name__ == "__main__":
cleanup()
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client['prot2vec']
asp = 'F' # molecular function
onto = get_ontology(asp)
t0 = datetime.datetime(2014, 1, 1, 0, 0)
t1 = datetime.datetime(2014, 9, 1, 0, 0)
# t0 = datetime.datetime(2017, 1, 1, 0, 0)
# t1 = datetime.datetime.utcnow()
print("Indexing Data...")
trn_stream, tst_stream = get_training_and_validation_streams(db, t0, t1, asp)
print("Loading Training Data...")
uid2seq_trn, uid2go_trn, go2uid_trn = trn_stream.to_dictionaries(propagate=True)
print("Loading Validation Data...")
uid2seq_tst, uid2go_tst, _ = tst_stream.to_dictionaries(propagate=True)
print("Building Graph...")
graph = Graph(onto, uid2seq_trn, go2uid_trn)
print("Graph contains %d nodes" % len(graph))
print("Pruning Graph...")
deleted_nodes = graph.prune(3)
print("Pruned %d, Graph contains %d" % (len(deleted_nodes), len(graph)))
save_object(graph, "Data/dingo_%s_graph" % asp)
print("Load DingoNet")
go_embedding_weights = np.asarray([onto.todense(go) for go in onto.classes])
net = DingoNet(ATTN, 100, 10, go_embedding_weights)
net = net.cuda()
ckpth = "/tmp/dingo_0.10420.tar"
print("=> loading checkpoint '%s'" % ckpth)
checkpoint = torch.load(ckpth, map_location=lambda storage, loc: storage)
net.load_state_dict(checkpoint['net'])
print("Precomputing graph vectors")
leaves = graph.leaves
nodes_data = [(seq, leaf) for leaf in leaves for seq in leaf.sequences]
compute_vectors(nodes_data, net, onto)
print("Compute K-S datasets")
compute_datasets(fast_cosine_similarity, leaves)
limit = None
evalue = 0.001
print("Running BLAST evalue=%s..." % evalue)
targets = [Seq(uid, seq) for uid, seq in uid2seq_tst.items()][:limit]
db_pth = prepare_blast(uid2seq_trn)
hits_per_uid = predict_blast_parallel(targets, uid2go_trn, db_pth, evalue)
predictions_pindent = preds_by_attr(hits_per_uid, "pident")
save_object(hits_per_uid, "%s/blast_%s_%s_hsp" % (out_dir, evalue, GoAspect(asp)))
save_object(predictions_pindent, "Data/blast_%s_preds" % (GoAspect(asp),))
cleanup()
print("Precomputing target vectors...")
tgtid2nodes = {tgt: [graph[go] for go in terms if go in graph] for tgt, terms in hits_per_uid.items()}
targets_data = [(tgt, node) for tgt in targets for node in tgtid2nodes[tgt.uid] if node.is_leaf()]
compute_vectors(targets_data, net, onto)
print("123 Predict...")
dingo_predictions = {}
blast_predictions = {}
for i, tgt in enumerate(targets):
msg = "[%d/%d] (%s) leaves processed" % (i, len(targets), tgt.uid)
candidates = tgtid2nodes[tgt.uid]
pbar = tqdm(range(len(candidates)), desc=msg)
leaf_predictions = predict_by_ks(tgt, candidates, fast_cosine_similarity, pbar)
predictions = propagate_leaf_predictions(leaf_predictions)
dingo_predictions[tgt.uid] = predictions
ths, _, _, f1s = performance({tgt.uid: predictions}, {tgt.uid: uid2go_tst[tgt.uid]})
j = np.argmax(f1s)
msg = "[%d/%d] (%s) F_max=%.2f @ tau=%.2f" % (i, len(targets), tgt.uid, f1s[j], ths[j])
pbar.set_description(msg)
pbar.close()
save_object(dingo_predictions, "Data/dingo_%s_preds" % (GoAspect(asp),))
cleanup()
|
yotamfr/prot2vec
|
src/python/digo2.py
|
Python
|
mit
| 11,505
|
[
"BLAST"
] |
bcd7130d6fc4bd34af3ae1a7ffda132ac797ea5739a78a6bde5eceeec34d6d8b
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode
~~~~~~~~~~~~~
Utilities parsing and analyzing Python code.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from sphinx import package_dir
from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import next, StringIO, BytesIO, TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
# load the Python grammar
_grammarfile = path.join(package_dir, 'pycode', 'Grammar.txt')
pygrammar = driver.load_grammar(_grammarfile)
pydriver = driver.Driver(pygrammar, convert=nodes.convert)
# an object with attributes corresponding to token and symbol names
class sym: pass
for k, v in pygrammar.symbol2number.iteritems():
setattr(sym, k, v)
for k, v in token.tok_name.iteritems():
setattr(sym, v, k)
# a dict mapping terminal and nonterminal numbers to their names
number2name = pygrammar.number2symbol.copy()
number2name.update(token.tok_name)
_eq = nodes.Leaf(token.EQUAL, '=')
class AttrDocVisitor(nodes.NodeVisitor):
"""
Visitor that collects docstrings for attribute assignments on toplevel and
in classes (class attributes and attributes set in __init__).
The docstrings can either be in special '#:' comments before the assignment
or in a docstring after it.
"""
def init(self, scope, encoding):
self.scope = scope
self.in_init = 0
self.encoding = encoding
self.namespace = []
self.collected = {}
self.tagnumber = 0
self.tagorder = {}
def add_tag(self, name):
name = '.'.join(self.namespace + [name])
self.tagorder[name] = self.tagnumber
self.tagnumber += 1
def visit_classdef(self, node):
"""Visit a class."""
self.add_tag(node[1].value)
self.namespace.append(node[1].value)
self.generic_visit(node)
self.namespace.pop()
def visit_funcdef(self, node):
"""Visit a function (or method)."""
# usually, don't descend into functions -- nothing interesting there
self.add_tag(node[1].value)
if node[1].value == '__init__':
# however, collect attributes set in __init__ methods
self.in_init += 1
self.generic_visit(node)
self.in_init -= 1
def visit_expr_stmt(self, node):
"""Visit an assignment which may have a special comment before (or
after) it.
"""
if _eq not in node.children:
# not an assignment (we don't care for augmented assignments)
return
# look *after* the node; there may be a comment prefixing the NEWLINE
# of the simple_stmt
parent = node.parent
idx = parent.children.index(node) + 1
while idx < len(parent):
if parent[idx].type == sym.SEMI:
idx += 1
continue # skip over semicolon
if parent[idx].type == sym.NEWLINE:
prefix = parent[idx].get_prefix()
if not isinstance(prefix, unicode):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
if docstring:
self.add_docstring(node, docstring)
return # don't allow docstrings both before and after
break
# now look *before* the node
pnode = node[0]
prefix = pnode.get_prefix()
# if the assignment is the first statement on a new indentation
# level, its preceding whitespace and comments are not assigned
# to that token, but the first INDENT or DEDENT token
while not prefix:
pnode = pnode.get_prev_leaf()
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
if not isinstance(prefix, unicode):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
def visit_simple_stmt(self, node):
"""Visit a docstring statement which may have an assignment before."""
if node[0].type != token.STRING:
# not a docstring; but still need to visit children
return self.generic_visit(node)
prev = node.get_prev_sibling()
if not prev:
return
if prev.type == sym.simple_stmt and \
prev[0].type == sym.expr_stmt and _eq in prev[0].children:
# need to "eval" the string because it's returned in its
# original form
docstring = literals.evalString(node[0].value, self.encoding)
docstring = prepare_docstring(docstring)
self.add_docstring(prev[0], docstring)
def add_docstring(self, node, docstring):
# add an item for each assignment target
for i in range(0, len(node) - 1, 2):
target = node[i]
if self.in_init and self.number2name[target.type] == 'power':
# maybe an attribute assignment -- check necessary conditions
if (# node must have two children
len(target) != 2 or
# first child must be "self"
target[0].type != token.NAME or target[0].value != 'self' or
# second child must be a "trailer" with two children
self.number2name[target[1].type] != 'trailer' or
len(target[1]) != 2 or
# first child must be a dot, second child a name
target[1][0].type != token.DOT or
target[1][1].type != token.NAME):
continue
name = target[1][1].value
elif target.type != token.NAME:
# don't care about other complex targets
continue
else:
name = target.value
self.add_tag(name)
if docstring:
namespace = '.'.join(self.namespace)
if namespace.startswith(self.scope):
self.collected[namespace, name] = docstring
class ModuleAnalyzer(object):
# cache for analyzer objects -- caches both by module and file name
cache = {}
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
except Exception, err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
return obj
@classmethod
def for_module(cls, modname):
if ('module', modname) in cls.cache:
entry = cls.cache['module', modname]
if isinstance(entry, PycodeError):
raise entry
return entry
try:
type, source = get_module_source(modname)
if type == 'string':
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
except PycodeError, err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
return obj
def __init__(self, source, modname, srcname, decoded=False):
# name of the module
self.modname = modname
# name of the source file
self.srcname = srcname
# file-like object yielding source lines
self.source = source
# cache the source code as well
pos = self.source.tell()
if not decoded:
self.encoding = detect_encoding(self.source.readline)
self.source.seek(pos)
self.code = self.source.read().decode(self.encoding)
self.source.seek(pos)
self.source = TextIOWrapper(self.source, self.encoding)
else:
self.encoding = None
self.code = self.source.read()
self.source.seek(pos)
# will be filled by tokenize()
self.tokens = None
# will be filled by parse()
self.parsetree = None
# will be filled by find_attr_docs()
self.attr_docs = None
self.tagorder = None
# will be filled by find_tags()
self.tags = None
def tokenize(self):
"""Generate tokens from the source."""
if self.tokens is not None:
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
except tokenize.TokenError, err:
raise PycodeError('tokenizing failed', err)
self.source.close()
def parse(self):
"""Parse the generated source tokens."""
if self.parsetree is not None:
return
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
except parse.ParseError, err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is not None:
return self.attr_docs
self.parse()
attr_visitor = AttrDocVisitor(number2name, scope, self.encoding)
attr_visitor.visit(self.parsetree)
self.attr_docs = attr_visitor.collected
self.tagorder = attr_visitor.tagorder
# now that we found everything we could in the tree, throw it away
# (it takes quite a bit of memory for large modules)
self.parsetree = None
return attr_visitor.collected
def find_tags(self):
"""Find class, function and method definitions and their location."""
if self.tags is not None:
return self.tags
self.tokenize()
result = {}
namespace = []
stack = []
indent = 0
defline = False
expect_indent = False
def tokeniter(ignore = (token.COMMENT, token.NL)):
for tokentup in self.tokens:
if tokentup[0] not in ignore:
yield tokentup
tokeniter = tokeniter()
for type, tok, spos, epos, line in tokeniter:
if expect_indent:
if type != token.INDENT:
# no suite -- one-line definition
assert stack
dtype, fullname, startline, _ = stack.pop()
endline = epos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline)
expect_indent = False
if tok in ('def', 'class'):
name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
defline = True
elif type == token.INDENT:
expect_indent = False
indent += 1
elif type == token.DEDENT:
indent -= 1
# if the stacklevel is the same as it was before the last
# def/class block, this dedent closes that block
if stack and indent == stack[-1][3]:
dtype, fullname, startline, _ = stack.pop()
endline = spos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline)
elif type == token.NEWLINE:
# if this line contained a definition, expect an INDENT
# to start the suite; if there is no such INDENT
# it's a one-line definition
if defline:
defline = False
expect_indent = True
self.tags = result
return result
if __name__ == '__main__':
import time, pprint
x0 = time.time()
#ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html')
ma = ModuleAnalyzer.for_file('sphinx/environment.py',
'sphinx.environment')
ma.tokenize()
x1 = time.time()
ma.parse()
x2 = time.time()
#for (ns, name), doc in ma.find_attr_docs().iteritems():
# print '>>', ns, name
# print '\n'.join(doc)
pprint.pprint(ma.find_tags())
x3 = time.time()
#print nodes.nice_repr(ma.parsetree, number2name)
print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2)
|
waseem18/oh-mainline
|
vendor/packages/sphinx/sphinx/pycode/__init__.py
|
Python
|
agpl-3.0
| 13,014
|
[
"VisIt"
] |
c24756b145bd53162a6135c03d1c27b4f565010c0563b6354b3e8b60b1f28484
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
version('2016.4', '19c8b5c85f3ec62df79d2249a3c272f8')
version('2016.3', 'e9e3a41bd123b52fbcc6b32d09f8202b')
version('5.1.4', 'ba2e34d59b3982603b4935d650c08040')
version('5.1.2', '614d0be372f1a6f1f36382b7a6fcab98')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('cmake@2.8.8:', type='build')
depends_on('cuda', when='+cuda')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.spec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
return options
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/gromacs/package.py
|
Python
|
lgpl-2.1
| 3,856
|
[
"Gromacs"
] |
6f6ae9d877fe1b59696b66f225fc3fc0c75381f1c799f8fd3b6dd3fe7760fd7a
|
from paraview.simple import *
import ReaderSync
from math import *
import time
#execfile('/home/fbordeu/projects/PGD/ParaviewPXDMFReader/Python/ReaderSync.py')
sync = ReaderSync.ReaderSync()
nbdims = len(sync.maxs.keys())
offset = 2*pi/nbdims
for i in range(200):
for j in range(nbdims):
key = sync.maxs.keys()[j]
mi = sync.mins[key]
ma = sync.maxs[key]
sync.SetFixedDimension(key,(sin(j*offset+(20./(20+nbdims/2-j))*i/10.)/2+0.5)*(ma-mi)+mi)
Render()
time.sleep(.01)
#name = str(i)
#WriteImage("/tmp/videoparaview/image"+(5-len(name))*"0" + name+".png")
|
aleygue/PxdmfSuite
|
ParaviewPXDMFReader/PGDTools/ParavieAnim.py
|
Python
|
bsd-3-clause
| 575
|
[
"ParaView"
] |
ad0e039ebcc3c0c237e00af07465bac4fd7297b37365916b6f3ad3f246296c35
|
"""
Course Outline page in Studio.
"""
import datetime
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from .course_page import CoursePage
from .container import ContainerPage
from .utils import set_input_value_and_save, set_input_value, click_css, confirm_prompt
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator)
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0]
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class")
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click()
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax()
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click()
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
return modal
@property
def release_date(self):
element = self.q(css=self._bounded_selector(".status-release-value"))
return element.first.text[0] if element.present else None
@property
def due_date(self):
element = self.q(css=self._bounded_selector(".status-grading-date"))
return element.first.text[0] if element.present else None
@property
def policy(self):
element = self.q(css=self._bounded_selector(".status-grading-value"))
return element.first.text[0] if element.present else None
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def expand_subsection(self):
"""
Toggle the expansion of this subsection.
"""
self.browser.execute_script("jQuery.fx.off = true;")
def subsection_expanded():
add_button = self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).first.results
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
self.q(css=self._bounded_selector('.ui-toggle-expansion i')).first.click()
self.wait_for_element_presence(self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Subsection is expanded')
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
self.browser.execute_script("jQuery.fx.off = false;")
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
return "is-collapsed" in self.q(css=self._bounded_selector('')).first.attrs("class")[0]
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState:
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return self.q(css='body.view-outline').present and self.q(css='div.ui-loading.is-hidden').present
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def click_release_date(self):
"""
Open release date edit modal of first section in course outline
"""
self.q(css='div.section-published-date a.edit-release-date').first.click()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .fa-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
def start_reindex(self):
"""
Starts course reindex by clicking reindex button
"""
self.reindex_button.click()
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
@property
def reindex_button(self):
"""
Returns reindex button.
"""
return self.q(css=".button.button-reindex")[0]
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.expand_subsection()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.expand_subsection()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
@property
def license(self):
"""
Returns the course license text, if present. Else returns None.
"""
return self.q(css=".license-value").first.text[0]
class CourseOutlineModal(object):
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
self.find_css(selector).nth(index).click()
def save(self):
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
self.click(".action-cancel")
def has_release_date(self):
return self.find_css("#start_date").present
def has_due_date(self):
return self.find_css("#due_date").present
def has_policy(self):
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for i in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
@property
def release_date(self):
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def due_date(self):
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise unchecks the box.
"""
if value != self.is_explicitly_locked:
self.find_css('label[for="staff_lock"]').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
|
shubhdev/openedx
|
common/test/acceptance/pages/studio/overview.py
|
Python
|
agpl-3.0
| 23,921
|
[
"VisIt"
] |
17a69d128e3a6286667e1d8fcc5be1c6415657e4d350800b770e7744f7ebc649
|
from __future__ import division, print_function
from sklearn import mixture
from sklearn.neighbors import KernelDensity
from astroML.density_estimation import XDGMM
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.optimize import curve_fit
from astroML.plotting import hist
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
import argparse
#ARGUMENTOS
parser = argparse.ArgumentParser(description='VPD Plot')
parser.add_argument('<Input List>', help='Catalogo final -> Fotometria + PMs')
parser.add_argument('--max-mag', type=float, default=20.0, help='Corte superior en magnitud (Default 20)')
parser.add_argument('--min-mag', type=float, default=8.0, help='Corte inferior en magnitud (Default 8)')
parser.add_argument('--max-err', type=float, default=2.0, help='Maximo error a considerar (Default 2)')
parser.add_argument('--lim', type=float, default=16, help='Limite en PM para el plot (cuadrado)')
parser.add_argument('--pm-cut', type=float, default=30, help='No considerar PM mayor al valor (Default 30)')
parser.add_argument('--comp', type=int, default=2, help='Nro de componentes para el Gaussian Mixture (Default 2)')
parser.add_argument('--kernel', type=str, default='linear', help='Kernel para el KDE (gaussian, tophat, epanechnikov, exponential, linear, cosine)')
parser.add_argument('--center', nargs=2, default=None, help='Forzar centro a las coordenadas entregadas')
parser.add_argument('--hexbins', type=int, default=None, help='Usa bines hexagonales, se debe especificar tamano grilla')
parser.add_argument('--levels', type=int, default=7, help='Numero de niveles para el contour plot')
parser.add_argument('--hist2d', action='store_true', help='Hace el histograma en 2D en vez del KDE')
parser.add_argument('--no-save', action='store_true', help='Mostrar plot en pantalla en vez de guardar')
parser.add_argument('--output', type=str, default='VPDbins.png', help='Cambiar nombre del file de output')
args = parser.parse_args()
inputs = vars(args)['<Input List>']
max_mag = args.max_mag
min_mag = args.min_mag
max_err = args.max_err
n_comp = args.comp
lim = args.lim
#Lee datos
if 'PM_final' in inputs:
ids, pmx, pmy, magK, nframes, pmex, pmey = np.genfromtxt(inputs, unpack=True, usecols=(0,3,4,5,6,8,9))
else:
#ID L B Ks EKs H EH J EJ Y EY Z EZ ULCOSB EULCOSB UB EUB NFRAMES
ids, magK, pmx, pmex, pmy, pmey, nframes = np.genfromtxt(inputs, unpack=True, usecols=(0,3,13,14,15,16, 17))
#Filtros
mag_mask = (magK < max_mag) & (magK > min_mag)
err_mask = (pmex**2 + pmey**2)**0.5 < max_err
nfr_mask = (nframes >= 8)
pm_mask = (pmx**2 + pmy**2)**0.5 < args.pm_cut
mask = mag_mask & err_mask & nfr_mask & pm_mask
data = np.transpose([pmx, pmy])[mask]
data_err = np.zeros(data.shape + data.shape[-1:])
diag = np.arange(data.shape[-1])
data_err[:, diag, diag] = np.vstack([pmex**2, pmey**2]).T[mask]
print('\tTotal de estrellas: %d' % len(mask))
print('\tNumero de estrellas seleccionadas: %d' % mask.sum())
print('\tEstrellas con PM sobre %.2f: %d' % (args.pm_cut, (~pm_mask).sum()))
print('\tPromedios (x,y): %f, %f' % (np.mean(pmx[mask]), np.mean(pmy[mask])))
print('\tDesviacion estandar: %f, %f' % (np.std(pmx[mask]), np.std(pmy[mask])))
#Calcula centros
g = mixture.GMM(n_components=n_comp, covariance_type='full').fit(data)
x, y = np.transpose(g.means_)
print('\nX_3D Y_3D (cruces blancas)')
modulo = np.sum(g.means_**2, axis=1)**0.5
ormod = np.argsort(modulo)
g.means_ = g.means_[ormod]
#g.means_[0] = np.array([0,0])
print(g.means_)
print('sig_X_3D sig_Y_3D')
for cov in g.covars_:
print(np.sqrt(np.diag(cov)))
idx = np.argmin((x**2 + y**2)**0.5)
##XDGMM
#XD = XDGMM(n_components=n_comp, n_iter=10).fit(data, data_err)
#xd_x, xd_y = np.transpose(XD.mu)
#print('X_XD Y_XD')
#print(XD.mu)
#radio = ((pmx[mask] - x[idx])**2 + (pmy[mask] - y[idx])**2)**0.5 < 3
#Gaussianas
def gaussian(x, amp, mu, sig):
if (amp < 0) or (sig < 0):
return np.inf
return amp * np.exp(-(x-mu)**2 / (2*sig**2))
def two_gaussian(x, amp1, mu1, sig1,
amp2, mu2, sig2):
return (gaussian(x, amp1, mu1, sig1) +
gaussian(x, amp2, mu2, sig2))
def three_gaussian(x, amp1, mu1, sig1,
amp2, mu2, sig2,
amp3, mu3, sig3):
return (gaussian(x, amp1, mu1, sig1) +
gaussian(x, amp2, mu2, sig2) +
gaussian(x, amp3, mu3, sig3))
if args.comp == 2:
gf = two_gaussian
elif args.comp == 3:
gf = three_gaussian
elif args.comp == 1:
gf = gaussian
#Plot
bins = np.arange(-lim, lim+0.5, 0.5)
binc = (bins[:-1] + bins[1:])/2.
cmap = cm.get_cmap('jet')
color = cmap(np.linspace(0, 1, cmap.N))
fig = plt.figure(figsize=[10,10])
gs = gridspec.GridSpec(4,4)
ax = plt.subplot(gs[1:,:-1])
axu = plt.subplot(gs[0,:-1])
axd = plt.subplot(gs[1:,-1])
#KDE
kde = KernelDensity(kernel=args.kernel).fit((pmx[mask])[:,np.newaxis])
xx = np.arange(-lim, lim, 0.05)
yy = np.exp(kde.score_samples(xx[:,np.newaxis]))
x2d = xx[np.argmax(yy)]
a0 = np.exp(kde.score_samples(x[:,np.newaxis]))
if args.comp == 3:
p0 = [a0[0]/2.0, x[0], 3, a0[1]/2.0, x[1], 3, a0[2]/2.0, x[2], 3]
elif args.comp == 2:
p0 = [a0[0]/2.0, x[0], 3, a0[1]/2.0, x[1], 3]
elif args.comp == 1:
p0 = [a0[0]/2.0, x[0], 3]
popt, pcov = curve_fit(gf, xx, yy, p0=p0, maxfev=100000)
x0g = popt[1::3]
x0s = popt[2::3]
x0e = np.sqrt(np.diag(pcov)[1::3])
naxu, _, _ = axu.hist(pmx[mask], bins=bins, histtype='stepfilled', normed=True, color=cmap(0), alpha=.75)
axu.plot(xx, gaussian(xx, *popt[0:3]), color=cmap(0.6), lw=2.5, alpha=.9)
if args.comp >= 2:
axu.plot(xx, gaussian(xx, *popt[3:6]), color=cmap(0.4), lw=2.5, alpha=.9)
if args.comp >= 3:
axu.plot(xx, gaussian(xx, *popt[6:9]), color=cmap(0.2), lw=2, alpha=.9)
axu.plot(xx,yy, color=cmap(0.8), lw=2, alpha=.9)
bpop, bcov = curve_fit(gf, binc, naxu, p0=p0, maxfev=100000)
x0b = bpop[1::3]
xsb = bpop[2::3]
xbe = np.sqrt(np.diag(bcov)[1::3])
axu.plot(xx, gaussian(xx, *bpop[0:3]), color=cmap(0.3), lw=2.5, alpha=.9)
axu.set_xlim(-lim ,lim)
kde = KernelDensity(kernel=args.kernel).fit((pmy[mask])[:,np.newaxis])
xx = np.arange(-lim, lim, 0.05)
yy = np.exp(kde.score_samples(xx[:,np.newaxis]))
y2d = xx[np.argmax(yy)]
a0 = np.exp(kde.score_samples(y[:,np.newaxis]))
if args.comp == 3:
p0 = [a0[0]/2.0, y[0], 3, a0[1]/2.0, y[1], 3, a0[2]/2.0, y[2], 3]
elif args.comp == 2:
p0 = [a0[0]/2.0, y[0], 3, a0[1]/2.0, y[1], 3]
elif args.comp == 1:
p0 = [a0[0]/2.0, x[0], 3]
popt, pcov = curve_fit(gf, xx, yy, p0=p0, maxfev=100000)
y0g = popt[1::3]
y0s = popt[2::3]
ybe = np.sqrt(np.diag(pcov)[1::3])
naxd, _, _ = axd.hist(pmy[mask], bins=bins, histtype='stepfilled', normed=True, color=cmap(0), alpha=.75, orientation='horizontal')
axd.plot(gaussian(xx, *popt[0:3]), xx, color=cmap(0.6), lw=2.5, alpha=.9)
if args.comp >= 2:
axd.plot(gaussian(xx, *popt[3:6]), xx, color=cmap(0.4), lw=2.5, alpha=.9)
if args.comp >= 3:
axd.plot(gaussian(xx, *popt[6:9]), xx, color=cmap(0.2), lw=2, alpha=.9)
axd.plot(yy,xx, color=cmap(0.8), lw=2, alpha=.9)
bpop, bcov = curve_fit(gf, binc, naxd, p0=p0, maxfev=100000)
y0b = bpop[1::3]
ysb = bpop[2::3]
y0e = np.sqrt(np.diag(bcov)[1::3])
axd.plot(gaussian(xx, *bpop[0:3]), xx, color=cmap(0.3), lw=2.5, alpha=.9)
axd.set_ylim(-lim ,lim)
print('\nX_2D Y_2D (puntos blancos)')
print(np.transpose([x0g, y0g]))
print('X_err_2D Y_err_2D')
print(np.transpose([x0e, y0e]))
print('sig_X_2D sig_Y_2D')
print(np.transpose([x0s, y0s]))
print('\nX_bin Y_bin')
print(np.transpose([x0b, y0b]))
print('X_err_bin Y_err_bin')
print(np.transpose([xbe, ybe]))
print('sig_X_bin sig_Y_bin')
print(np.transpose([xsb, ysb]))
print('\nMax 2D (cuadrado blanco)')
print(x2d, y2d)
#print('\nCalculando KDE 2D...')
xy = np.transpose([pmx, pmy])[mask]
k2d = KernelDensity(kernel=args.kernel, bandwidth=0.4).fit(xy)
xg = np.linspace(-lim, lim, 100)
X, Y = np.meshgrid(xg, xg)
XY = np.vstack([Y.ravel(), X.ravel()]).T
logd = k2d.score_samples(XY)
Z = np.exp(logd).reshape(X.shape)
x3d = Y.ravel()[np.argmax(Z)]
y3d = X.ravel()[np.argmax(Z)]
#from scipy.stats import norm
#mu, sigma = norm.fit(pmx[mask])
#axu.plot(xx, norm.pdf(xx, mu, sigma), '-k')
#mu, sigma = norm.fit(pmy[mask])
#axd.plot(norm.pdf(xx, mu, sigma), xx, '-k')
print('\nMax 3D (triangulo blanco)')
print(x3d, y3d)
if args.hexbins != None:
h = ax.hexbin(data.T[0], data.T[1], gridsize=args.hexbins)
elif args.hist2d:
H, xedges, yedges, img = ax.hist2d(data.T[0], data.T[1], bins=bins)
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
h = ax.matshow(np.rot90(H), cmap=cmap,extent=extent)
ax.grid(linestyle='-', color='white', lw=.5, which='both', alpha=.2)
ax.minorticks_on()
else:
#h = ax.matshow(np.rot90(Z), extent=[-15, 15, -15, 15])
h = ax.contourf(Y,X,Z)
h = ax.contourf(Y,X,Z, levels=np.linspace(0, h.levels[-1], args.levels+1))
ax.minorticks_on()
ax.plot([0], [0], '+', color='k', ms=15, mew=1.5)
ax.plot(x, y, 'xw', mew=1.5)
#ax.plot(xd_x, xd_y, 'x', color='gray', mew=1.5)
ax.plot(x0g, y0g, 'ow')
ax.plot(x2d, y2d, 'sw', ms=3)
ax.plot(x3d, y3d, '^w', ms=7)
ax.set_xlim(-lim, lim)
div = make_axes_locatable(ax)
#cax = div.append_axes("right", size="5%", pad=0.2)
co = fig.add_axes([0.29, 0.16, 0.25, 0.01])
cb = fig.colorbar(h, cax=co, orientation='horizontal')
#ax.add_patch(patches.Rectangle((-10, -22.1), 20, 1.5, alpha=.66, color='w', lw=0))
cb.ax.tick_params(labelcolor='#000000', pad=7.5, length=4)
plt.setp(plt.xticks()[1], rotation=45)
#ax.set_aspect('equal')
if args.no_save:
plt.show()
else:
fig.savefig(args.output, dpi=100, bbox_inches='tight')
import sys
sys.exit(1)
|
astrofelipe/MP2015
|
VPD.py
|
Python
|
mit
| 9,762
|
[
"Gaussian"
] |
8cc9b620cacb8f763cc5f6afb2d3fa4438b18e8d8fa8d2577261f9821d8ea69c
|
#-*- coding:utf-8 -*-
"""
This file is part of exparser.
exparser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
exparser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with exparser. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from scipy.stats import nanmean, nanmedian, nanstd, ttest_ind, linregress
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
from matplotlib import mpl
import warnings
import numpy as np
from exparser.TangoPalette import *
from exparser.RBridge import RBridge
from exparser.Cache import cachedArray, cachedDataMatrix
from exparser.DataMatrix import DataMatrix
def getTrace(dm, signal=None, phase=None, traceLen=None, offset=0,
lock='start', traceTemplate='__trace_%s__', baseline=None, baselineLen=100,
baselineOffset=0, baselineLock='end', smoothParams=None, nanPad=True,
transform=None, deriv=0, regress=None, **dummy):
"""
desc:
Gets a trace for a single trial.
arguments:
dm:
desc: A DataMatrix with only a single trial (if more trials are in
there, only the first will be used).
type: DataMatrix
keywords:
signal:
desc: 'x', 'y', or 'pupil'.
type: [str, unicode, NoneType]
phase:
desc: The name of the phase.
type: [str, unicode]
traceLen:
desc: The length of the trace to plot.
type: int
offset:
desc: The first (if lock == start) or last (if lock == end)
samples to skip.
type: int
lock:
desc: Indicates whether the trace should be locked from the
phase start or end.
type: [str, unicode]
traceTemplate:
desc: Used to map a phase name onto a DataMatrix key.
type: [str, unicode]
baseline:
desc: The phase to use for the baseline.
type: [str, unicode, NoneType]
baselineLen:
desc: The length of the baseline.
type: int
baselineOffset:
desc: The first (if lock == start) or last (if lock == end)
baseline samples to skip.
type: int
baselineLock:
desc: Indicates whether the baseline should be locked from the
phase start or end.
type: [str, unicode]
smoothParams:
desc: A {'windowLen' : [..], 'windowType' : [..]} dictionary
that is used to specify signal smoothing (see smooth()),
or `None` for no smoothing.
type: [dict, NoneType]
nanPad:
desc: If set to True, traces that are shorter than traceLen
are padded with np.nan values.
type: bool
transform:
desc: A function to transform the trace value.
type: [NoneType, function]
deriv:
desc: Indicates the derivative that should be used. The 1st
derivative corresponds to velocity, the 2nd to acceleration.
type: int
regress:
desc: A function that regresses the X and Y coordinate out of
pupil size.
type: FunctionType
returns:
desc: A 1D NumPy array with the trace.
type: ndarray
"""
if len(dm) != 1:
raise Exception('DataMatrix must have exactly one row')
if signal == None or phase == None or traceLen == None:
raise Exception('signal, phase, and traceLen are required keywords')
if lock not in ['start', 'end']:
raise Exception('lock should be start or end')
if baselineLock not in ['start', 'end']:
raise Exception('baselineLock should be start or end')
if signal == 'x':
i = 0
elif signal == 'y':
i = 1
elif signal == 'pupil':
i = 2
else:
raise Exception('Invalid signal!')
# Get the trace
npy = dm[traceTemplate % phase][0]
if not os.path.exists(npy):
raise Exception('Missing .npy trace file: %s (path="%s")' \
% (traceTemplate % phase, npy))
aFull = np.load(npy)
if regress != None:
_aTrace = regress(aFull, dm)
else:
_aTrace = aFull[:,i]
if smoothParams != None:
try:
_aTrace = smooth(_aTrace, **smoothParams)
except:
warnings.warn('Failed to smooth trace of length %d' % len(_aTrace))
if deriv > 0:
_aTrace = traceDeriv(_aTrace, deriv)
if lock == 'start':
_aTrace = _aTrace[offset:offset+traceLen]
elif offset > 0:
_aTrace = _aTrace[-offset-traceLen:-offset]
else:
_aTrace = _aTrace[-traceLen:]
# Paste the trace into a nan-filled trace that has exactly the desired
# length. This is necessary to deal with traces that are shorter than the
# specified traceLen.
if nanPad:
aTrace = np.empty(traceLen)
aTrace[:] = np.nan
if lock == 'start':
aTrace[:len(_aTrace)] = _aTrace
else:
aTrace[-len(_aTrace):] = _aTrace
else:
aTrace = _aTrace
# Optionally apply a transform
if transform != None:
aTrace = transform(aTrace)
# If we don't apply a baseline then return right away, possible after
# smoothing
if baseline == None:
if smoothParams != None:
aTrace = smooth(aTrace, **smoothParams)
return aTrace
# Get the baseline
npy = dm[traceTemplate % baseline][0]
if not os.path.exists(npy):
raise Exception('Missing .npy trace file: %s (path="%s")' \
% (traceTemplate % baseline, npy))
aFull = np.load(npy)
if regress != None:
aBaseline = regress(aFull, dm)
else:
aBaseline = aFull[:,i]
if smoothParams != None:
aBaseline = smooth(aBaseline, **smoothParams)
if deriv > 0:
aBaseline = traceDeriv(aBaseline, deriv)
if baselineLock == 'start':
aBaseline = aBaseline[baselineOffset:baselineOffset+baselineLen]
elif baselineOffset == 0:
aBaseline = aBaseline[-baselineLen:]
else:
aBaseline = aBaseline[-baselineOffset-baselineLen:-baselineOffset]
if transform != None:
aBaseline = transform(aBaseline)
mBaseline = aBaseline.mean()
aTrace /= mBaseline
return aTrace
def getTraceAvg(dm, avgFunc=nanmean, **traceParams):
"""
Gets a single average trace
Arguments:
dm -- a DataMatrix
Keyword arguments:
avgFunc -- the function to use to determine the average trace. This
function must be robust to nan values. (default=nanmean)
*traceParams -- see getTrace()
Returns:
An (xData, yData, errData) tuple, where errData contains the standard
error.
"""
traceLen = traceParams['traceLen']
mTrace = np.empty( (len(dm), traceLen) )
mTrace[:] = np.nan
i = 0
for trialDm in dm:
aTrace = getTrace(trialDm, **traceParams)
mTrace[i, 0:len(aTrace)] = aTrace
i += 1
xData = np.linspace(0, traceLen, traceLen)
yData = nanmean(mTrace, axis=0)
errData = nanstd(mTrace, axis=0) / np.sqrt(mTrace.shape[0])
errData = np.array( [errData, errData] )
return xData, yData, errData
def getTracePeak(dm, peakFunc=np.nanmax, **traceParams):
a = getTrace(dm, **traceParams)
i = np.where(a == peakFunc(a))
xPeak = i[0][0]
yPeak = a[xPeak]
return xPeak, yPeak
def getTracePeakAvg(dm, **traceParams):
"""
desc:
Gets the average peak value from.
arguments:
dm:
desc: A DataMatrix.
type: DataMatrix
keyword-dict:
*traceParams: See getTrace()
Returns:
An (xPeak, yPeak, xErr, yErr) tuple, where xErr and yErr contain the
standard error.
"""
traceLen = traceParams['traceLen']
mTrace = np.empty( (len(dm), traceLen) )
mTrace[:] = np.nan
i = 0
aXPeak = np.empty(len(dm))
aYPeak = np.empty(len(dm))
for i, trialDm in enumerate(dm):
xPeak, yPeak = getTracePeak(trialDm, **traceParams)
aXPeak[i] = xPeak
aYPeak[i] = yPeak
return aXPeak.mean(), aYPeak.mean(), np.std(aXPeak)/np.sqrt(len(aXPeak)), \
np.std(aYPeak)/np.sqrt(len(aYPeak))
def plotTraceAvg(ax, dm, avgFunc=nanmean, lineColor=blue[0], lineStyle='-',
errColor=gray[1], errAlpha=.4, label=None, _downSample=None, aErr=None,
orientation='horizontal', **traceParams):
"""
Plots a single average trace
Arguments:
ax -- a Matplotlib axis
dm -- a DataMatrix
Keyword arguments:
avgFunc -- see getTraceAvg()
lineColor -- the line color (default=blue[0])
lineStyle -- the line style (default='-')
errColor -- the color for the error shading (default=gray[1])
errAlpha -- the opacity for the error shading (default=.4)
traceTemplate -- is used to determine the correct key from the DataMatrix
based on the phase (default='__trace_%s__')
label -- a line label (default=None)
_downSample -- specify a decrease in resolution, to decrease the size
of the plot. (default=None)
aErr -- a 2-D array to use to draw the error shading, or None
to use the error data calculated by getTraceAvg()
orientation -- 'horizontal' or 'vertical'. (default='horizontal')
*traceParams -- see getTrace()
"""
xData, yData, errData = getTraceAvg(dm, avgFunc=avgFunc, **traceParams)
if aErr != None:
errData = aErr
if _downSample != None:
xData = downSample(xData, _downSample)
yData = downSample(yData, _downSample)
errData = downSample(errData, _downSample)
if orientation == 'horizontal':
ax.plot(xData, yData, color=lineColor, label=label, linestyle=lineStyle)
if errColor != None:
ax.fill_between(xData, yData-errData[0], yData+errData[1], \
color=errColor, alpha=errAlpha)
else:
ax.plot(yData, xData, color=lineColor, label=label)
if errColor != None:
ax.fill_betweenx(xData, yData-errData[0], yData+errData[1],
color=errColor, alpha=errAlpha)
def plotTraceContrast(dm, select1, select2, color1=blue[1], color2=orange[1],
colorDiff=green[1], label1=None, label2=None, labelDiff=None,
errAlpha=.25, model=None, showAbs=True, showDiff=False, **params):
"""
desc: |
Creates a trace-contrast plot, with two lines and error shadings.
NOTE: Passing the `cacheId` keyword will cause the lmer statistics to be
cached.
arguments:
dm:
desc: A DataMatrix.
type: DataMatrix
select1:
desc: A select statement for the first trace.
type: [str, unicode]
select2:
desc: A select statement for the second trace.
type: [str, unicode]
keywords:
color1:
desc: A color for the first trace.
type: [str, unicode]
color2:
desc: A color for the second trace.
type: [str, unicode]
colorDiff:
desc: A color for the difference trace.
type: [str, unicode]
label1:
desc: A label for the first trace.
type: [str, unicode, NoneType]
label2:
desc: A label for the second trace.
type: [str, unicode, NoneType]
labelDiff:
desc: A label for the difference trace.
type: [str, unicode, NoneType]
errAlpha:
desc: Opacity level for the error bars.
type: [float, int]
model:
desc: A statistical model to be passed onto `mixedModelTrace()` or
`None` to skip statistics.
type: [str, unicode, NoneType]
showAbs:
desc: Indicates whether the absolute traces (i.e. traces 1 and 2)
should be shown.
type: bool
showDiff:
desc: Indicates whether the difference trace should be shown.
type: bool
"""
x1, y1, err1 = getTraceAvg(dm.select(select1, verbose=False), **params)
x2, y2, err2 = getTraceAvg(dm.select(select2, verbose=False), **params)
y3 = y2-y1
if model != None:
ldm = mixedModelTrace(dm, model=model, **params)
ldm = ldm.select('effect == "%s"' % ldm['effect'][1])
aSe = ldm['se']
aT = ldm['t']
y1min = y1 - aSe/2
y1max = y1 + aSe/2
y2min = y2 - aSe/2
y2max = y2 + aSe/2
y3min = y3 - aSe
y3max = y3 + aSe
if showAbs:
plt.fill_between(x1, y1min, y1max, color=color1, alpha=errAlpha)
plt.fill_between(x2, y2min, y2max, color=color2, alpha=errAlpha)
if showDiff:
plt.fill_between(x1, y3min, y3max, color=colorDiff, alpha=errAlpha)
markStats(plt.gca(), aT, **params)
if showAbs:
plt.plot(x1, y1, color=color1, label=label1)
plt.plot(x2, y2, color=color2, label=label2)
if showDiff:
plt.plot(x1, y3, color=colorDiff, label=labelDiff)
def traceDiff(dm, select1, select2, epoch=None, **traceParams):
"""
Deteremines the difference in the trace between two subsets of the data
(i.e. two groups or conditions) within a particular epoch.
Arguments:
dm -- A DataMatrix.
select1 -- A select statement for the first trace.
select2 -- A select statement for the second trace.
Keyword arguments:
epoch -- The time interval for which to estimate the trace
difference. This can be None to use the entire trace,
a single int to take one sample, or a (start, end) tuple
to select a particular epoch. (default=None)
traceParams -- The trace parameters. (default=trialParams)
Returns:
A single value reflecting the trace difference.
"""
x1, y1, err1 = getTraceAvg(dm.select(select1, verbose=False), \
**traceParams)
x2, y2, err2 = getTraceAvg(dm.select(select2, verbose=False), \
**traceParams)
d = y1-y2
if type(epoch) == int:
return d[epoch]
if type(epoch) == tuple and len(epoch) == 2:
d = d[epoch[0]:epoch[1]]
elif epoch != None:
raise Exception('Epoch should be None, int, or (int, int)')
return d.mean()
@cachedDataMatrix
def mixedModelTrace(dm, model, winSize=1, effectIndex=1, **traceParams):
"""
desc:
Perform a mixed model over a single trace. The dependent variable is
specifed through the signal and phase keywords.
arguments:
dm:
desc: A DataMatrix.
type: DataMatrix
model:
desc: An lmer-style model. This needs to be only the fixed and
random effects part of the model, so everything after
the `~` sign. For example `cond + (1|subject_nr)`.
type: str
keywords:
winSize:
desc: Indicates the number of samples that should be skipped
each time. For a real analysis, this should be 1, but
for a quick look, it can be increased (default=1)
type: int
keyword-dict:
*traceParams: See getTrace().
returns: |
A DataMatrix with nr-of-effects*nr-of-samples rows and the following
columns:
- `i`: sample number
- `effect`: name of effect
- `est`: estimated effect (slope/ intercept)
- `se`: standard error of effect
- `t`: t-value of effect
"""
if not model.startswith('mmdv__ ~ '):
model = 'mmdv__ ~ ' + model
global R
try:
R
except:
R = RBridge()
traceLen = traceParams['traceLen']
l = [ ['i', 'effect', 'est', 'se', 't'] ]
for i in range(0, traceLen, winSize):
# First calculate the mean value for the current signal slice for each
# trial and save that in a copy of the DataMatrix
_dm = dm.addField('mmdv__', dtype=float)
for trialId in range(len(_dm)):
aTrace = getTrace(_dm[trialId], **traceParams)
if i < len(aTrace):
sliceMean = aTrace[i:i+winSize].mean()
else:
sliceMean = np.nan
_dm['mmdv__'][trialId] = sliceMean
# Do mixed effects
R.load(_dm)
_dm = R.lmer(model)
_dm._print(sign=4, title='%d - %d' % (i, i+winSize))
for k in range(winSize):
if i+k >= traceLen:
break
for j in _dm.range():
l.append([i+k, _dm['effect'][j], _dm['est'][j], _dm['se'][j],
_dm['t'][j]])
return DataMatrix(l)
def statsTrace(dm, key='t', intercept=False, show=True):
colors = brightColors[:]
for effect in dm.unique('effect'):
if not intercept and effect == '(Intercept)':
continue
_dm = dm.select('effect == "%s"' % effect)
plt.plot(_dm['t'], label=effect, color=colors.pop())
plt.legend(frameon=False)
if show:
plt.show()
def markStats(ax, aStat, below=False, _abs=True, thr=2., minSmp=200,
color=gray[1], alpha=.2, loExt=False, hiExt=False, showSpurious=False,
**args):
"""
Marks all timepoints in a figure with colored shading when the significance
falls below an alpha threshold.
Arguments:
ax -- a matplotlib axis
aStat -- an array with statistics, such as p-values or t-values.
Keyword arguments:
below -- Indicates whether values blow the threshold (True) or above
it (False) are considered to be signficant. (default=True)
thr -- the threshold (default=.01)
minSmp -- the minimum number of consecutive significant samples
(default=10)
color -- the color for the shading (default=gray[1])
alpha -- The alpha value. (default=.2)
loExt -- Indicates whether intervals before the signal should be
treated as significant, such that at the start of the signal
there is no minimum number of samples. (default=False)
hiExt -- Like `loExt`, but for the end of the signal. (default=False)
showSpurious -- Indicates whether 'spurious' signficant points should be
shown as well. I.e. those that are do not meet the
minSmp threshold.
Returns:
A list of (start, end) tuples with significant regions
"""
lRoi = []
iFrom = None
aSign = np.zeros(len(aStat))
aSpurious = np.zeros(len(aStat))
if _abs:
aStat = np.abs(aStat)
for i in range(len(aStat)):
pVal = aStat[i]
hit = (pVal < thr and below) or (pVal > thr and not below)
if hit:
aSpurious[i] = 1
if iFrom == None:
iFrom = i
if ((not hit or (i == len(aStat)-1)) and iFrom != None):
if i-iFrom >= minSmp-1 or (iFrom == 0 and loExt) or \
(i == len(aStat)-1 and hiExt):
print 'Significant region: %d - %d' % (iFrom, i-1)
ax.axvspan(iFrom, i-1, ymax=1, color=color, zorder=-9999,
alpha=alpha)
lRoi.append((iFrom, i-1))
aSign[iFrom:i-1] = 1
iFrom = None
if showSpurious:
aSpurious[np.where(aSign == 1)] = 0
for i in np.where(aSpurious == 1)[0]:
print 'Spurious %d' % i
ax.axvline(i, color='black')
return lRoi
def smooth(aTrace, windowLen=11, windowType='hanning', correctLen=True):
"""
Source: <http://www.scipy.org/Cookbook/SignalSmooth>
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Arguments:
aTrace -- an array with the input signal
Keyword arguments:
windowLen -- the dimension of the smoothing window; should be an odd
integer (default=5)
windowType -- the type of window from 'flat', 'hanning', 'hamming',
'bartlett', 'blackman'. Flat window will produce a moving
average smoothing.
correctLen -- indicates whether the return string should be the same
length as the input string (default=True).
Returns:
An array with the smoothed signal
"""
if aTrace.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if aTrace.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return aTrace
if not windowType in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError( \
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[aTrace[windowLen-1:0:-1], aTrace, aTrace[-1:-windowLen:-1]]
if windowType == 'flat': #moving average
w = np.ones(windowLen, 'd')
else:
func = getattr(np, windowType)
w = func(windowLen)
y = np.convolve(w/w.sum(), s, mode='valid')
if correctLen:
y = y[(windowLen/2-1):-(windowLen/2)]
# The output array can be one shorter than the input array
if len(y) > len(aTrace):
y = y[:len(aTrace)]
elif len(y) < len(aTrace):
raise Exception('The output array is too short!')
return y
def downSample(aTrace, i):
"""
Downsamples an array by skipping samples.
Arguments:
aTrace -- input array
i -- downsampling ratio
Returns:
A downsampled array
"""
if len(aTrace.shape) == 1:
return aTrace[::i]
elif len(aTrace.shape) == 2:
l = []
for d in range(aTrace.shape[0]):
l.append(aTrace[d,::i])
return np.array(l)
else:
raise Exception('Only 1 and 2-dimensional arrays are allowed')
def traceDeriv(aTrace, deriv=0):
"""
desc:
Gets the N-th derivative of a signal. Here, the 1st derivative is
velocity, the 2nd acceleration, etc.
arguments:
aTrace:
desc: Input array.
type: ndarray
keywords:
deriv:
desc: The derivative.
type: int
returns:
desc: An array with the derivative.
type: ndarray
"""
aTrace.dtype = np.float
while deriv > 0:
aTrace = aTrace[1:]-aTrace[:-1]
deriv -= 1
return aTrace
def latency(aTrace, at=None, vt=None, plot=False):
"""
Determines the response latency in a signal, based on an accelation and/ or
velocity threshold.
Arguments:
aTrace -- input array
Keyword arguments:
at -- acceleration threshold (default=None)
vt -- velocity threshold (default=None)
plot -- indicates whether a plot should be shown (default=False)
Returns:
The first sample where the acceleration or velocity threshold is exceeded
"""
if at == None and vt == None:
raise Exception( \
'You must specify an accelation and/or velocity threshold')
velTrace = aTrace[1:] - aTrace[:-1]
accTrace = velTrace[1:] - velTrace[:-1]
aLat = None
vLat = None
if vt != None:
l = np.where(np.abs(velTrace) > vt)[0]
if len(l) > 0:
vLat = l[0]
if at != None:
l = np.where(np.abs(accTrace) > at)[0]
if len(l) > 0:
aLat = l[0]
if aLat == None and vLat == None:
lat = None
elif aLat == None:
lat = vLat
elif vLat == None:
lat = aLat
else:
lat = min(aLat, vLat)
if plot:
plt.subplot(311)
plt.plot(aTrace)
if lat != None:
plt.axvline(lat)
plt.subplot(312)
plt.plot(velTrace)
if vt != None:
plt.axhline(vt, color='red')
if lat != None:
plt.axvline(lat)
plt.axhline()
plt.subplot(313)
plt.plot(accTrace)
if at != None:
plt.axhline(at, color='red')
if lat != None:
plt.axvline(lat)
plt.axhline()
plt.show()
return lat
def blinkReconstruct(aTrace, vt=5, maxDur=500, margin=10, plot=False):
"""
Reconstructs pupil size during blinks.
Arguments:
aTrace -- The input trace.
Keyword arguments:
pvt -- The pupil velocity threshold. Lower tresholds more easily
trigger blinks (default=5.)
maxDur -- The maximum duration for a blink. Longer blinks are
ignored. (default=500)
plot -- Indicates whether the algorithm should be plotted.
(default=False)
Returns:
An array with the reconstructed pupil data or an (array, figure) tuple when
plot==True.
"""
# Create a copy of the signal, a smoothed version, and calculate the
# velocity profile.
aTrace = np.copy(aTrace)
try:
sTrace = smooth(aTrace, windowLen=21)
except Exception as e:
warnings.warn(str(e))
sTrace = aTrace
vTrace = sTrace[1:]-sTrace[:-1]
if plot:
plt.clf()
fig = plt.figure(figsize=(10,5))
plt.rc("font", family='Liberation Sans')
plt.rc("font", size=10)
plt.subplots_adjust(wspace=.25, hspace=.4)
plt.subplot(2,2,1)
plt.title('Original signal')
plt.plot(aTrace, color=blue[1])
plt.xlabel('Time (ms)')
plt.ylabel('Pupil size (arbitrary units)')
plt.subplot(2,2,2)
plt.title('Smoothed signal')
plt.plot(sTrace, color=blue[1])
plt.xlabel('Time (ms)')
plt.ylabel('Pupil size (arbitrary units)')
plt.subplot(2,2,3)
plt.title('Velocity profile')
plt.plot(vTrace, color=blue[1])
plt.xlabel('Time (ms)')
plt.ylabel('Velocity (arbitrary units)')
# Start blink detection
iFrom = 0
lBlink = []
while True:
# The onset of the blink is the moment at which the pupil velocity
# exceeds the threshold.
l = np.where(vTrace[iFrom:] < -vt)[0]
if len(l) == 0:
break # No blink detected
iStart = l[0]+iFrom
if iFrom == iStart:
break
# The reversal period is the moment at which the pupil starts to dilate
# again with a velocity above threshold.
l = np.where(vTrace[iStart:] > vt)[0]
if len(l) == 0:
iFrom = iStart
continue
iMid = l[0]+iStart
# The end blink period is the moment at which the pupil velocity drops
# back to zero again.
l = np.where(vTrace[iMid:] < 0)[0]
if len(l) == 0:
iFrom = iMid
continue
iEnd = l[0]+iMid
iFrom = iEnd
# We generally underestimate the blink period, so compensate for this
if iStart-margin >= 0:
iStart -= margin
if iEnd+margin < len(aTrace):
iEnd += margin
# We don't accept blinks that are too long, because blinks are not
# generally very long (although they can be).
if iEnd-iStart > maxDur:
continue
if plot:
plt.axvspan(iStart, iEnd, color=gray[-1], alpha=.4)
lBlink.append( (iStart, iEnd) )
if plot:
plt.subplot(2,2,4)
plt.title('Reconstructed signal')
# Now reconstruct the trace during the blinks
for iStart, iEnd in lBlink:
# First create a list of (when possible) four data points that we can
# use for interpolation.
dur = iEnd - iStart
l = []
if iStart-dur >= 0:
l += [iStart-dur]
l += [iStart, iEnd]
if iEnd+dur < len(sTrace):
l += [iEnd+dur]
x = np.array(l)
# If the list is long enough we use cubic interpolation, otherwise we
# use linear interpolation
y = aTrace[x]
if plot:
plt.plot(x, y, 'o', color=orange[1])
if len(x) >= 4:
f2 = interp1d(x, y, kind='cubic')
else:
f2 = interp1d(x, y)
xInt = np.arange(iStart, iEnd)
yInt = f2(xInt)
aTrace[xInt] = yInt
if plot:
plt.plot(aTrace, color=blue[1])
plt.xlabel('Time (ms)')
plt.ylabel('Pupil size (arbitrary units)')
if plot:
return aTrace, fig
return aTrace
@cachedDataMatrix
def splitTrace(dm, splitCol, phase, phaseBefore=None, phaseAfter=None, \
traceTemplate='__trace_%s__'):
"""
Splits all traces in the DataMatrix by the value in a specific column. This
allows, for example, to split a single trace at the point at which a
response was given.
NOTE: This function is cached.
Arguments:
splitCol -- The column that contains the values to split by.
phase -- The name of the phase to split.
Keyword arguments:
phaseBefore -- The name of the phase that will be the first part of the
split. None means that this part of the split will not be
kept. (default=None)
phaseAfter -- The name of the phase that will be the second part of the
split. None means that this part of the split will not be
kept. (default=None)
traceTemplate -- See `getTrace()`.
**traceParams -- See `getTrace()`.
Returns:
The DataMatrix with the splitted phased added.
"""
if phaseBefore == None and phaseAfter == None:
raise Exception('Either phaseBefore or phaseAfter should be specified')
if phaseBefore != None:
dm = dm.addField(traceTemplate % phaseBefore, dtype=str)
if phaseAfter != None:
dm = dm.addField(traceTemplate % phaseAfter, dtype=str)
for i in dm.range():
npy = dm[traceTemplate % phase][i]
a = np.load(npy)
split = dm[splitCol][i]
aBefore = a[:split]
aAfter = a[split:]
if phaseBefore != None:
npyBefore = npy + '.before.npy'
np.save(npyBefore, aBefore)
dm[traceTemplate % phaseBefore][i] = npyBefore
if phaseAfter != None:
npyAfter = npy + '.after.npy'
np.save(npyAfter, aAfter)
dm[traceTemplate % phaseAfter][i] = npyAfter
return dm
|
lvanderlinden/exparser
|
exparser/TraceKit.py
|
Python
|
gpl-2.0
| 26,516
|
[
"ASE"
] |
d8e6b6dc1d5c0717e4f7461d6f428545f064472ce095a4194112c5f974c92159
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pathlib import Path
import json
import gzip
import numpy as np
import warnings
from shutil import copyfile, copyfileobj
from monty.tempfile import ScratchDir
import xml.etree.cElementTree as ET
from pymatgen.electronic_structure.core import OrbitalType
from pymatgen.io.vasp.inputs import Kpoints, Poscar
from pymatgen.io.vasp.outputs import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar, Dynmat, BSVasprun, UnconvergedVASPWarning, \
VaspParserError, Wavecar, Waveder, Elfcar, Eigenval
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
from pymatgen.core import Element
class VasprunTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_multiple_dielectric(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.GW0.xml")
self.assertEqual(len(v.other_dielectric), 3)
def test_charge_charge_dielectric(self):
"""
VASP 5.4.4 writes out two dielectric functions to vasprun.xml
These are the "density-density" and "velocity-velocity" linear response functions.
See the comments in `linear_optics.F` for details.
"""
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric_5.4.4",
parse_potcar_file=False)
self.assertEqual(v.dielectric is not None, True)
self.assertEqual('density' in v.dielectric_data, True)
self.assertEqual('velocity' in v.dielectric_data, True)
def test_optical_absorption_coeff(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.BSE.xml.gz")
absorption_coeff = v.optical_absorption_coeff
self.assertEqual(absorption_coeff[1], 24966408728.917931)
def test_vasprun_with_more_than_two_unlabelled_dielectric_functions(self):
with self.assertRaises(NotImplementedError):
Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric_bad",
parse_potcar_file=False)
def test_bad_vasprun(self):
self.assertRaises(ET.ParseError,
Vasprun, self.TEST_FILES_DIR / "bad_vasprun.xml")
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
v = Vasprun(self.TEST_FILES_DIR / "bad_vasprun.xml",
exception_on_bad_xml=False)
# Verify some things
self.assertEqual(len(v.ionic_steps), 1)
self.assertAlmostEqual(v.final_energy, -269.00551374)
self.assertTrue(issubclass(w[-1].category,
UserWarning))
def test_runtype(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.GW0.xml")
self.assertIn(v.run_type, "HF")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.hse06")
self.assertIn(v.run_type, "HSE06")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.scan_rvv10")
self.assertIn(v.run_type, "SCAN+rVV10")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt.ionic")
self.assertIn(v.run_type, "GGA")
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt")
self.assertIn(v.run_type, "GGA+U")
def test_vdw(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.vdw")
self.assertAlmostEqual(v.final_energy, -9.78310677)
def test_nonlmn(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.nonlm'
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
0]].keys())
self.assertIn(OrbitalType.s, orbs)
def test_standard(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test NELM parsing.
self.assertEqual(vasprun.parameters["NELM"], 60)
# test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301,))
filepath2 = self.TEST_FILES_DIR / 'lifepo4.xml'
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
parse_potcar_file=False)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in range(len(vasprun.ionic_steps))]))
self.assertEqual(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
# test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
# Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
# Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][
0][0][96][0], 0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
def test_unconverged(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.unconverged'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category,
UnconvergedVASPWarning))
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
def test_dfpt(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt'
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0],
3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1],
-0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2],
3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(
check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
def test_dfpt_ionic(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt.ionic'
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0],
515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1],
-0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2],
19.02110169)
def test_dfpt_unconverged(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt.unconverged'
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
def test_uniform(self):
vasprun_uniform = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.uniform",
parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style,
Kpoints.supported_modes.Reciprocal)
def test_no_projected(self):
vasprun_no_pdos = Vasprun(self.TEST_FILES_DIR / "Li_no_projected.xml",
parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
def test_dielectric(self):
vasprun_diel = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric",
parse_potcar_file=False)
self.assertAlmostEqual(0.4294, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[2][85][3])
def test_dielectric_vasp608(self):
# test reading dielectric constant in vasp 6.0.8
vasprun_diel = Vasprun(
self.TEST_FILES_DIR / "vasprun.xml.dielectric_6.0.8",
parse_potcar_file=False)
self.assertAlmostEqual(0.4338, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(5.267, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(
0.4338, vasprun_diel.dielectric_data["density"][0][10])
self.assertAlmostEqual(
5.267, vasprun_diel.dielectric_data["density"][1][51][0])
self.assertAlmostEqual(
0.4338, vasprun_diel.dielectric_data["velocity"][0][10])
self.assertAlmostEqual(
1.0741, vasprun_diel.dielectric_data["velocity"][1][51][0])
self.assertEqual(len(vasprun_diel.other_dielectric), 0)
def test_indirect_vasprun(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.indirect.gz")
(gap, cbm, vbm, direct) = v.eigenvalue_band_properties
self.assertFalse(direct)
def test_optical_vasprun(self):
vasprun_optical = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.opticaltransitions",
parse_potcar_file=False)
self.assertAlmostEqual(3.084, vasprun_optical.optical_transition[0][0])
self.assertAlmostEqual(3.087, vasprun_optical.optical_transition[3][0])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[0][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[1][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[7][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[19][1])
self.assertAlmostEqual(3.3799999999,
vasprun_optical.optical_transition[54][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[55][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[56][0])
self.assertAlmostEqual(10554.9860,
vasprun_optical.optical_transition[54][1])
self.assertAlmostEqual(0.0, vasprun_optical.optical_transition[55][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[56][1])
def test_force_constants(self):
vasprun_fc = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt.phonon",
parse_potcar_file=False)
fc_ans = [[-0.00184451, -0., -0.],
[-0., -0.00933824, -0.03021279],
[-0., -0.03021279, 0.01202547]]
nm_ans = [[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306]]
nm_eigenval_ans = [-0.59067079, -0.59067079, -0.59067003, -0.59067003,
-0.59067003, -0.59067003, -0.585009, -0.585009,
-0.58500895, -0.58500883, -0.5062956, -0.5062956]
self.assertEqual(vasprun_fc.force_constants.shape, (16, 16, 3, 3))
self.assertTrue(np.allclose(vasprun_fc.force_constants[8, 9], fc_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvals.size, 48)
self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvals[17:29],
nm_eigenval_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvecs.shape, (48, 16, 3))
self.assertTrue(
np.allclose(vasprun_fc.normalmode_eigenvecs[33], nm_ans))
def test_Xe(self):
vr = Vasprun(self.TEST_FILES_DIR / 'vasprun.xml.xe',
parse_potcar_file=False)
self.assertEqual(vr.atomic_symbols, ['Xe'])
def test_invalid_element(self):
self.assertRaises(ValueError, Vasprun,
self.TEST_FILES_DIR / 'vasprun.xml.wrong_sp')
def test_selective_dynamics(self):
vsd = Vasprun(self.TEST_FILES_DIR / 'vasprun.xml.indirect.gz')
np.testing.assert_array_equal(
vsd.final_structure.site_properties.get('selective_dynamics'),
[[True] * 3, [False] * 3], "Selective dynamics parsing error")
def test_as_dict(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
self.assertEqual(vasprun.as_dict()['input']['nkpoints'], 24)
def test_get_band_structure(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / 'vasprun_Si_bands.xml'
vasprun = Vasprun(filepath,
parse_projected_eigen=True,
parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13],
"wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
projected = bs.get_projection_on_elements()
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"], 0.4238)
projected = bs.get_projections_on_elements_and_orbitals(
{"Si": ["s"]})
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"]["s"], 0.4238)
# Test compressed files case 1: compressed KPOINTS in current dir
with ScratchDir("./"):
copyfile(self.TEST_FILES_DIR / 'vasprun_Si_bands.xml',
'vasprun.xml')
# Check for error if no KPOINTS file
vasprun = Vasprun('vasprun.xml',
parse_projected_eigen=True,
parse_potcar_file=False)
with self.assertRaises(VaspParserError):
_ = vasprun.get_band_structure(line_mode=True)
# Check KPOINTS.gz succesfully inferred and used if present
with open(self.TEST_FILES_DIR / 'KPOINTS_Si_bands', 'rb') as f_in:
with gzip.open('KPOINTS.gz', 'wb') as f_out:
copyfileobj(f_in, f_out)
bs_kpts_gzip = vasprun.get_band_structure()
self.assertEqual(bs.efermi, bs_kpts_gzip.efermi)
self.assertEqual(bs.as_dict(), bs_kpts_gzip.as_dict())
# Test compressed files case 2: compressed vasprun in another dir
with ScratchDir("./"):
os.mkdir('deeper')
copyfile(self.TEST_FILES_DIR / 'KPOINTS_Si_bands', Path('deeper') / 'KPOINTS')
with open(self.TEST_FILES_DIR / 'vasprun_Si_bands.xml', 'rb') as f_in:
with gzip.open(os.path.join('deeper', 'vasprun.xml.gz'),
'wb') as f_out:
copyfileobj(f_in, f_out)
vasprun = Vasprun(os.path.join('deeper', 'vasprun.xml.gz'),
parse_projected_eigen=True,
parse_potcar_file=False)
bs_vasprun_gzip = vasprun.get_band_structure(line_mode=True)
self.assertEqual(bs.efermi, bs_vasprun_gzip.efermi)
self.assertEqual(bs.as_dict(), bs_vasprun_gzip.as_dict())
# test hybrid band structures
vasprun.actual_kpoints_weights[-1] = 0.
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [0])
self.assertAlmostEqual(cbm['energy'], 6.3676)
self.assertEqual(cbm['kpoint'].label, None)
self.assertEqual(vbm['kpoint_index'], [0])
self.assertAlmostEqual(vbm['energy'], 2.8218)
self.assertEqual(vbm['kpoint'].label, None)
# test self-consistent band structure calculation for non-hybrid functionals
vasprun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.forcehybridlikecalc",
parse_projected_eigen=True,
parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / "KPOINTS.forcehybridlikecalc",
force_hybrid_mode=True, line_mode=True)
dict_to_test = bs.get_band_gap()
self.assertTrue(dict_to_test['direct'])
self.assertAlmostEqual(dict_to_test['energy'], 6.007899999999999)
self.assertEqual(dict_to_test['transition'], "\\Gamma-\\Gamma")
self.assertEqual(bs.get_branch(0)[0]['start_index'], 0)
self.assertEqual(bs.get_branch(0)[0]['end_index'], 0)
def test_sc_step_overflow(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.sc_overflow'
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# vasprun = Vasprun(filepath)
# self.assertEqual(len(w), 3)
vasprun = Vasprun(filepath)
estep = vasprun.ionic_steps[0]['electronic_steps'][29]
self.assertTrue(np.isnan(estep['e_wo_entrp']))
def test_update_potcar(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
potcar_path = self.TEST_FILES_DIR / 'POTCAR.LiFePO4.gz'
potcar_path2 = self.TEST_FILES_DIR / 'POTCAR2.LiFePO4.gz'
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(vasprun.potcar_spec,
[{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
self.assertRaises(ValueError, Vasprun, filepath,
parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
def test_potcar_not_found(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
# Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file='.')
self.assertEqual(len(w), 2)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
def test_parsing_chemical_shift_calculations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "nmr" / "cs" / "basic" / 'vasprun.xml.chemical_shift.scstep'
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 10)
self.assertTrue(vasprun.converged)
def test_parsing_efg_calcs(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "nmr" / "efg" / "AlPO4" / 'vasprun.xml'
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 18)
self.assertTrue(vasprun.converged)
def test_charged_structure(self):
vpath = self.TEST_FILES_DIR / 'vasprun.charged.xml'
potcar_path = self.TEST_FILES_DIR / 'POT_GGA_PAW_PBE' / 'POTCAR.Si.gz'
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get("NELECT", 8), 9)
self.assertEqual(vasprun.structures[0].charge, 1)
vpath = self.TEST_FILES_DIR / 'vasprun.split.charged.xml'
potcar_path = self.TEST_FILES_DIR / 'POTCAR.split.charged.gz'
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get('NELECT', 0), 7)
self.assertEqual(vasprun.structures[-1].charge, 1)
def test_kpointset_electronvelocities(self):
vpath = self.TEST_FILES_DIR / 'vasprun.lvel.Si2H.xml'
vasprun = Vasprun(vpath, parse_potcar_file=False)
self.assertEqual(vasprun.eigenvalues[Spin.up].shape[0], len(vasprun.actual_kpoints))
class OutcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
for f in ['OUTCAR', 'OUTCAR.gz']:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204,
'cores': '8'})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
self.assertFalse(outcar.lepsilon)
toten = 0
for k in outcar.final_energy_contribs.keys():
toten += outcar.final_energy_contribs[k]
self.assertAlmostEqual(toten, outcar.final_energy, 6)
def test_stopped_old(self):
filepath = self.TEST_FILES_DIR / 'OUTCAR.stopped'
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon_old_born', 'OUTCAR.lepsilon_old_born.gz']:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2],
0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][0][0], -572.5437, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][0], 683.2985, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][3], 73.07059, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][0][0], 570.98927, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][1][0], -683.68519, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][2][2], 570.98927, places=4)
def test_stopped(self):
filepath = self.TEST_FILES_DIR / 'OUTCAR.stopped'
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2],
0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][0][0], -572.5437, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][0], 683.2985, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][3], 73.07059, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][0][0], 570.98927, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][1][0], -683.68519, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][2][2], 570.98927, places=4)
def test_soc(self):
filepath = self.TEST_FILES_DIR / 'OUTCAR.NiO_SOC.gz'
outcar = Outcar(filepath)
expected_mag = (
{'s': Magmom([0.0, 0.0, -0.001]), 'p': Magmom([0.0, 0.0, -0.003]),
'd': Magmom([0.0, 0.0, 1.674]), 'tot': Magmom([0.0, 0.0, 1.671])},
{'s': Magmom([0.0, 0.0, 0.001]), 'p': Magmom([0.0, 0.0, 0.003]),
'd': Magmom([0.0, 0.0, -1.674]),
'tot': Magmom([0.0, 0.0, -1.671])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])}
)
# test note: Magmom class uses np.allclose() when testing for equality
# so fine to use assertEqual here
self.assertEqual(outcar.magnetization, expected_mag,
"Wrong vector magnetization read from Outcar for SOC calculation")
def test_polarization(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.BaTiO3.polar"
outcar = Outcar(filepath)
self.assertEqual(outcar.spin, True)
self.assertEqual(outcar.noncollinear, False)
self.assertAlmostEqual(outcar.p_ion[0], 0.0)
self.assertAlmostEqual(outcar.p_ion[1], 0.0)
self.assertAlmostEqual(outcar.p_ion[2], -5.56684)
self.assertAlmostEqual(outcar.p_sp1[0], 2.00068)
self.assertAlmostEqual(outcar.p_sp2[0], -2.00044)
self.assertAlmostEqual(outcar.p_elec[0], 0.00024)
self.assertAlmostEqual(outcar.p_elec[1], 0.00019)
self.assertAlmostEqual(outcar.p_elec[2], 3.61674)
def test_pseudo_zval(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.BaTiO3.polar"
outcar = Outcar(filepath)
self.assertDictEqual({'Ba': 10.00, 'Ti': 10.00, 'O': 6.00},
outcar.zval_dict)
filepath = self.TEST_FILES_DIR / "OUTCAR.LaSnNO2.polar"
outcar = Outcar(filepath)
self.assertDictEqual({'La': 11.0, 'N': 5.0, 'O': 6.0, 'Sn': 14.0},
outcar.zval_dict)
def test_dielectric(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.dielectric"
outcar = Outcar(filepath)
outcar.read_corrections()
self.assertAlmostEqual(outcar.data["dipol_quadrupol_correction"],
0.03565)
self.assertAlmostEqual(outcar.final_energy, -797.46760559)
def test_freq_dielectric(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.LOPTICS"
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.dielectric_energies[0], 0)
self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.826101)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
8.96938800)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
7.36167000e-01 + 1.53800000e-03j)
self.assertEqual(len(outcar.dielectric_energies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
plasma_freq = outcar.plasma_frequencies
self.assertArrayAlmostEqual(plasma_freq["intraband"], np.zeros((3, 3)))
self.assertArrayAlmostEqual(plasma_freq["interband"],
[[367.49, 63.939, 11.976],
[63.939, 381.155, -24.461],
[11.976, -24.461, 297.844]])
def test_freq_dielectric_vasp544(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.LOPTICS.vasp544"
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.dielectric_energies[0], 0)
self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.63964)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
12.769435 + 0j)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
0.828615 + 0.016594j)
self.assertEqual(len(outcar.dielectric_energies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
def test_read_elastic_tensor(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.total_tensor.Li2O.gz"
outcar = Outcar(filepath)
outcar.read_elastic_tensor()
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][0], 1986.3391)
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][1], 187.8324)
self.assertAlmostEqual(outcar.data["elastic_tensor"][3][3], 586.3034)
def test_read_piezo_tensor(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.lepsilon.gz"
outcar = Outcar(filepath)
outcar.read_piezo_tensor()
self.assertAlmostEqual(outcar.data["piezo_tensor"][0][0], 0.52799)
self.assertAlmostEqual(outcar.data["piezo_tensor"][1][3], 0.35998)
self.assertAlmostEqual(outcar.data["piezo_tensor"][2][5], 0.35997)
def test_core_state_eigen(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.CL"
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
filepath = self.TEST_FILES_DIR / "OUTCAR.icorelevel"
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[4]["3d"][-1], -31.4522)
def test_avg_core_poten(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.lepsilon"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[-1][1], -90.0487)
filepath = self.TEST_FILES_DIR / "OUTCAR"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[0][6], -73.1068)
def test_single_atom(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.Al"
outcar = Outcar(filepath)
expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)
expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,
'Total CPU time used (sec)': 50.194,
'Elapsed time (sec)': 52.337,
'Maximum memory used (kb)': 62900.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 49.602,
'cores': '32'})
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
def test_chemical_shielding(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "hydromagnesite" / "OUTCAR"
outcar = Outcar(filename)
expected_chemical_shielding = [[191.9974, 69.5232, 0.6342],
[195.0808, 68.183, 0.833],
[192.0389, 69.5762, 0.6329],
[195.0844, 68.1756, 0.8336],
[192.005, 69.5289, 0.6339],
[195.0913, 68.1859, 0.833],
[192.0237, 69.565, 0.6333],
[195.0788, 68.1733, 0.8337]]
self.assertAlmostEqual(
len(outcar.data["chemical_shielding"]["valence_only"][20: 28]),
len(expected_chemical_shielding))
self.assertArrayAlmostEqual(outcar.data["chemical_shielding"]["valence_and_core"][20:28],
expected_chemical_shielding, decimal=5)
def test_chemical_shielding_with_different_core_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
c_vo = outcar.data["chemical_shielding"]["valence_only"][7]
for x1, x2 in zip(list(c_vo),
[198.7009, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
c_vc = outcar.data["chemical_shielding"]["valence_and_core"][7]
for x1, x2 in zip(list(c_vc),
[-1.9406, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
def test_cs_raw_tensors(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
unsym_tensors = outcar.data["unsym_cs_tensor"]
self.assertEqual(unsym_tensors[0],
[[-145.814605, -4.263425, 0.000301],
[4.263434, -145.812238, -8.7e-05],
[0.000136, -0.000189, -142.794068]])
self.assertEqual(unsym_tensors[29],
[[287.789318, -53.799325, 30.900024],
[-53.799571, 225.668117, -17.839598],
[3.801103, -2.195218, 88.896756]])
def test_cs_g0_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
g0_contrib = outcar.data["cs_g0_contribution"]
self.assertEqual(g0_contrib,
[[-8.773535, 9e-06, 1e-06],
[1.7e-05, -8.773536, -0.0792],
[-6e-06, -0.008328, -9.320237]])
def test_cs_core_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
core_contrib = outcar.data["cs_core_contribution"]
self.assertEqual(core_contrib,
{'Mg': -412.8248405,
'C': -200.5098812,
'O': -271.0766979})
def test_nmr_efg(self):
filename = self.TEST_FILES_DIR / "nmr" / "efg" / "AlPO4" / "OUTCAR"
outcar = Outcar(filename)
expected_efg = [
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58}]
self.assertEqual(len(outcar.data["efg"][2:10]), len(expected_efg))
for e1, e2 in zip(outcar.data["efg"][2:10], expected_efg):
for k in e1.keys():
self.assertAlmostEqual(e1[k], e2[k], places=5)
exepected_tensors = [[[11.11, 1.371, 2.652], [1.371, 3.635, -3.572], [2.652, -3.572, -14.746]],
[[11.11, -1.371, 2.652], [-1.371, 3.635, 3.572], [2.652, 3.572, -14.746]],
[[-3.098, 6.511, 7.732], [6.511, 1.419, 11.445], [7.732, 11.445, 1.678]],
[[-3.098, -6.511, 7.732], [-6.511, 1.419, -11.445], [7.732, -11.445, 1.678]],
[[2.344, -10.775, -7.006], [-10.775, -7.152, -11.309], [-7.006, -11.309, 4.808]],
[[2.344, 10.775, -7.006], [10.775, -7.152, 11.309], [-7.006, 11.309, 4.808]],
[[2.404, -0.588, -6.83], [-0.588, 10.435, 3.159], [-6.83, 3.159, -12.839]],
[[2.404, 0.588, -6.83], [0.588, 10.435, -3.159], [-6.83, -3.159, -12.839]]]
self.assertEqual(len(outcar.data["unsym_efg_tensor"][2:10]), len(exepected_tensors))
for e1, e2 in zip(outcar.data["unsym_efg_tensor"][2:10], exepected_tensors):
self.assertArrayAlmostEqual(e1, e2)
def test_read_fermi_contact_shift(self):
filepath = self.TEST_FILES_DIR / "OUTCAR_fc"
outcar = Outcar(filepath)
outcar.read_fermi_contact_shift()
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'fch'][0][0],
-0.002)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'th'][0][0],
-0.052)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'dh'][0][0],
0.0)
def test_drift(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(len(outcar.drift), 5)
self.assertAlmostEqual(np.sum(outcar.drift), 0)
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.CL")
self.assertEqual(len(outcar.drift), 79)
self.assertAlmostEqual(np.sum(outcar.drift), 0.448010)
def test_electrostatic_potential(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(outcar.ngf, [54, 30, 54])
self.assertTrue(
np.allclose(outcar.sampling_radii, [0.9748, 0.9791, 0.7215]))
self.assertTrue(np.allclose(outcar.electrostatic_potential,
[-26.0704, -45.5046, -45.5046, -72.9539,
-73.0621, -72.9539, -73.0621]))
def test_mag_electrostatic_error(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.electrostaticerror.gz")
self.assertEqual(outcar.electrostatic_potential,
[-21.1667, -19.6865, -22.3983, -22.3307, -20.5213, -20.9292, -21.5063, -21.3554, -21.74,
-21.7018, -20.3422, -20.6128, -21.4405, -21.0022, -21.975, -21.915, -21.0156, -21.9027,
-22.3712, -21.5816, -21.8535, -20.5061, -22.2474, -22.1904, -22.2203, -20.1727, -21.1068,
-20.1669, -22.1272, -21.3446, -82.4717, -83.035, -81.8289, -82.5957, -81.7813, -82.5011,
-82.6098, -82.2885, -81.606, -99.1621, -99.3146, -99.1742, -99.4728, -100.2139, -99.852,
-99.3575, -99.4135, -98.9092, -99.8867, -99.3707, -99.0794, -98.8376, -99.3656, -98.6474,
-99.3264, -98.844, -99.074, -98.9354, -99.1643, -99.2412, -68.7667, -68.2528, -66.7326,
-67.7113, -69.2228, -67.014, -69.1456, -67.3151, -68.2625, -67.6156, -69.8112, -68.9266,
-67.8286, -69.3289, -68.7017, -67.2834, -68.4665, -68.0188, -67.7083, -69.7195, -67.4078,
-67.9646, -68.584, -69.2387, -69.7822, -67.0701, -67.8236, -68.2468, -68.6533, -68.3218,
-67.5923, -69.1266, -68.4615, -68.302, -67.999, -68.6709, -68.9973, -67.4147, -68.4463,
-68.0899, -67.665, -69.6705, -68.6433, -68.4288, -66.9027, -67.3211, -68.604, -69.1299,
-67.5565, -69.0845, -67.4289, -66.6864, -67.6484, -67.9783, -67.7661, -66.9797, -67.8007,
-68.3194, -69.3671, -67.2708])
def test_onsite_density_matrix(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.LinearResponseU.gz")
matrices = outcar.data["onsite_density_matrices"]
self.assertEqual(matrices[0][Spin.up][0][0], 1.0227)
self.assertEqual(len(matrices[0][Spin.up]), 5)
self.assertEqual(len(matrices[0][Spin.up][0]), 5)
self.assertTrue("onsite_density_matrices" in outcar.as_dict())
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR_merged_numbers")
matrices = outcar.data["onsite_density_matrices"]
self.assertEqual(matrices[0][Spin.up][0][-1], 0.0)
self.assertEqual(len(matrices[0][Spin.up]), 7)
self.assertEqual(len(matrices[0][Spin.up][0]), 7)
self.assertTrue("onsite_density_matrices" in outcar.as_dict())
def test_nplwvs(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(outcar.data["nplwv"], [[34560]])
self.assertEqual(outcar.data["nplwvs_at_kpoints"],
[1719, 1714, 1722, 1728, 1722, 1726, 1722, 1720, 1717, 1724, 1715, 1724, 1726, 1724, 1728,
1715, 1722, 1715, 1726, 1730, 1730, 1715, 1716, 1729, 1727, 1723, 1721, 1712, 1723, 1719,
1717, 1717, 1724, 1719, 1719, 1727, 1726, 1730, 1719, 1720, 1718, 1717, 1722, 1719, 1709,
1714, 1724, 1726, 1718, 1713, 1720, 1713, 1711, 1713, 1715, 1717, 1728, 1726, 1712, 1722,
1714, 1713, 1717, 1714, 1714, 1717, 1712, 1710, 1721, 1722, 1724, 1720, 1726, 1719, 1722,
1714])
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.CL")
self.assertEqual(outcar.data["nplwv"], [[None]])
self.assertEqual(outcar.data["nplwvs_at_kpoints"], [85687])
class BSVasprunTest(PymatgenTest):
_multiprocess_shared_ = True
def test_get_band_structure(self):
filepath = self.TEST_FILES_DIR / 'vasprun_Si_bands.xml'
vasprun = BSVasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(
kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
d = vasprun.as_dict()
self.assertIn("eigenvalues", d["output"])
class OszicarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'OSZICAR'
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'LOCPOT'
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(PymatgenTest):
@classmethod
def setUpClass(cls):
filepath = cls.TEST_FILES_DIR / 'CHGCAR.nospin'
cls.chgcar_no_spin = Chgcar.from_file(filepath)
filepath = cls.TEST_FILES_DIR / 'CHGCAR.spin'
cls.chgcar_spin = Chgcar.from_file(filepath)
filepath = cls.TEST_FILES_DIR / 'CHGCAR.Fe3O4'
cls.chgcar_fe3o4 = Chgcar.from_file(filepath)
filepath = cls.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz"
cls.chgcar_NiO_SOC = Chgcar.from_file(filepath)
def test_init(self):
self.assertAlmostEqual(self.chgcar_no_spin.get_integrated_diff(0, 2)[0, 1], 0)
self.assertAlmostEqual(self.chgcar_spin.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
# test sum
chgcar = self.chgcar_spin + self.chgcar_spin
self.assertAlmostEqual(chgcar.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
chgcar = self.chgcar_spin - self.chgcar_spin
self.assertAlmostEqual(chgcar.get_integrated_diff(0, 1)[0, 1], 0)
ans = [1.56472768, 3.25985108, 3.49205728, 3.66275028, 3.8045896,
5.10813352]
myans = self.chgcar_fe3o4.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
def test_write(self):
self.chgcar_spin.write_file("CHGCAR_pmg")
with open("CHGCAR_pmg") as f:
for i, line in enumerate(f):
if i == 22130:
self.assertEqual("augmentation occupancies 1 15\n", line)
if i == 44255:
self.assertEqual("augmentation occupancies 1 15\n", line)
os.remove("CHGCAR_pmg")
def test_soc_chgcar(self):
self.assertEqual(set(self.chgcar_NiO_SOC.data.keys()),
{'total', 'diff_x', 'diff_y', 'diff_z', 'diff'})
self.assertTrue(self.chgcar_NiO_SOC.is_soc)
self.assertEqual(self.chgcar_NiO_SOC.data['diff'].shape, self.chgcar_NiO_SOC.data['diff_y'].shape)
# check our construction of chg.data['diff'] makes sense
# this has been checked visually too and seems reasonable
self.assertEqual(abs(self.chgcar_NiO_SOC.data['diff'][0][0][0]),
np.linalg.norm([self.chgcar_NiO_SOC.data['diff_x'][0][0][0],
self.chgcar_NiO_SOC.data['diff_y'][0][0][0],
self.chgcar_NiO_SOC.data['diff_z'][0][0][0]]))
# and that the net magnetization is about zero
# note: we get ~ 0.08 here, seems a little high compared to
# vasp output, but might be due to chgcar limitations?
self.assertAlmostEqual(self.chgcar_NiO_SOC.net_magnetization, 0.0, places=0)
self.chgcar_NiO_SOC.write_file("CHGCAR_pmg_soc")
chg_from_file = Chgcar.from_file("CHGCAR_pmg_soc")
self.assertTrue(chg_from_file.is_soc)
os.remove("CHGCAR_pmg_soc")
def test_hdf5(self):
print(self.TEST_FILES_DIR)
chgcar = Chgcar.from_file(self.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz")
chgcar.to_hdf5("chgcar_test.hdf5")
import h5py
with h5py.File("chgcar_test.hdf5", "r") as f:
self.assertArrayAlmostEqual(np.array(f["vdata"]["total"]),
chgcar.data["total"])
self.assertArrayAlmostEqual(np.array(f["vdata"]["diff"]),
chgcar.data["diff"])
self.assertArrayAlmostEqual(np.array(f["lattice"]),
chgcar.structure.lattice.matrix)
self.assertArrayAlmostEqual(np.array(f["fcoords"]),
chgcar.structure.frac_coords)
for z in f["Z"]:
self.assertIn(z, [Element.Ni.Z, Element.O.Z])
for sp in f["species"]:
self.assertIn(sp, ["Ni", "O"])
chgcar2 = Chgcar.from_hdf5("chgcar_test.hdf5")
self.assertArrayAlmostEqual(chgcar2.data["total"],
chgcar.data["total"])
os.remove("chgcar_test.hdf5")
def test_spin_data(self):
d = self.chgcar_spin.spin_data
for k, v in d.items():
self.assertEqual(v.shape, (48, 48, 48))
def test_add(self):
chgcar_sum = self.chgcar_spin + self.chgcar_spin
self.assertArrayAlmostEqual(chgcar_sum.data['total'], self.chgcar_spin.data['total'] * 2)
chgcar_copy = self.chgcar_spin.copy()
chgcar_copy.structure = self.get_structure("Li2O")
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
chgcar_sum = chgcar_copy + self.chgcar_spin
# Verify some things
assert len(w) == 1
assert "Structures are different. Make sure you know what you are doing..." in str(w[-1].message)
self.assertRaises(ValueError, self.chgcar_spin.__add__, self.chgcar_fe3o4)
self.assertRaises(ValueError, self.chgcar_spin.__add__, self.chgcar_no_spin)
def test_as_dict_and_from_dict(self):
d = self.chgcar_NiO_SOC.as_dict()
chgcar_from_dict = Chgcar.from_dict(d)
self.assertArrayAlmostEqual(self.chgcar_NiO_SOC.data['total'], chgcar_from_dict.data['total'])
self.assertArrayAlmostEqual(self.chgcar_NiO_SOC.structure.lattice.matrix,
chgcar_from_dict.structure.lattice.matrix)
class ElfcarTest(PymatgenTest):
def test_init(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / 'ELFCAR.gz')
self.assertAlmostEqual(0.19076207645194002, np.mean(elfcar.data["total"]))
self.assertAlmostEqual(0.19076046677910055, np.mean(elfcar.data["diff"]))
def test_alpha(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / 'ELFCAR.gz')
alpha = elfcar.get_alpha()
self.assertAlmostEqual(2.936678808979031, np.median(alpha.data["total"]))
class ProcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
filepath = self.TEST_FILES_DIR / 'PROCAR.simple'
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd')[Spin.up], 0)
self.assertAlmostEqual(p.get_occupation(0, 's')[Spin.up],
0.35381249999999997)
self.assertAlmostEqual(p.get_occupation(0, 'p')[Spin.up], 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nbands, 10)
self.assertEqual(p.nkpoints, 10)
self.assertEqual(p.nions, 3)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[Spin.up][2][2],
{'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = self.TEST_FILES_DIR / 'PROCAR'
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.up],
0.96214813853000025)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.down],
0.85796295426000124)
def test_phase_factors(self):
filepath = self.TEST_FILES_DIR / 'PROCAR.phase'
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
-0.746 + 0.099j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 0, 0],
0.372 - 0.654j)
# Two Li should have same phase factor.
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
p.phase_factors[Spin.up][0, 0, 1, 0])
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 2, 0],
-0.053 + 0.007j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 2, 0],
0.027 - 0.047j)
# new style phase factors (VASP 5.4.4+)
filepath = self.TEST_FILES_DIR / 'PROCAR.new_format_5.4.4'
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0], -0.13 + 0.199j)
class XdatcarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'XDATCAR_4'
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = self.TEST_FILES_DIR / 'XDATCAR_5'
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
x.concatenate(self.TEST_FILES_DIR / 'XDATCAR_4')
self.assertEqual(len(x.structures), 8)
self.assertIsNotNone(x.get_string())
class DynmatTest(PymatgenTest):
def test_init(self):
# nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init
filepath = self.TEST_FILES_DIR / 'DYNMAT'
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(
d.data[4][2]['dispvec'], [0., 0.05, 0.]
))
self.assertTrue(np.allclose(
d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]
))
# TODO: test get_phonon_frequencies once cross-checked
class WavecarTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
self.vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))
b = np.array([np.cross(a[1, :], a[2, :]),
np.cross(a[2, :], a[0, :]),
np.cross(a[0, :], a[1, :])])
self.b = 2 * np.pi * b / self.vol
self.a = a
self.w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2')
self.wH2 = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.H2_low_symm', verbose=True)
self.wH2_gamma = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.H2_low_symm.gamma', verbose=True)
def test_standard(self):
w = self.w
a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))
b = np.array([np.cross(a[1, :], a[2, :]),
np.cross(a[2, :], a[0, :]),
np.cross(a[0, :], a[1, :])])
b = 2 * np.pi * b / vol
self.assertEqual(w.filename, self.TEST_FILES_DIR / 'WAVECAR.N2')
self.assertAlmostEqual(w.efermi, -5.7232, places=4)
self.assertEqual(w.encut, 25)
self.assertEqual(w.nb, 9)
self.assertEqual(w.nk, 1)
self.assertTrue(np.allclose(w.a, a))
self.assertTrue(np.allclose(w.b, b))
self.assertAlmostEqual(w.vol, vol)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.coeffs), w.nk)
self.assertEqual(len(w.coeffs[0]), w.nb)
self.assertEqual(len(w.band_energy), w.nk)
self.assertEqual(w.band_energy[0].shape, (w.nb, 3))
self.assertLessEqual(len(w.Gpoints[0]), 257)
for k in range(w.nk):
for b in range(w.nb):
self.assertEqual(len(w.coeffs[k][b]),
len(w.Gpoints[k]))
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.malformed')
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2', verbose=True)
self.assertNotEqual(out.getvalue().strip(), '')
finally:
sys.stdout = saved_stdout
def test_n2_45210(self):
w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.45210')
self.assertEqual(w.filename, self.TEST_FILES_DIR / 'WAVECAR.N2.45210')
self.assertAlmostEqual(w.efermi, -5.7232, places=4)
self.assertEqual(w.encut, 25)
self.assertEqual(w.nb, 9)
self.assertEqual(w.nk, 1)
self.assertTrue(np.allclose(w.a, self.a))
self.assertTrue(np.allclose(w.b, self.b))
self.assertAlmostEqual(w.vol, self.vol)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.coeffs), w.nk)
self.assertEqual(len(w.coeffs[0]), w.nb)
self.assertEqual(len(w.band_energy), w.nk)
self.assertEqual(w.band_energy[0].shape, (w.nb, 3))
self.assertLessEqual(len(w.Gpoints[0]), 257)
def test_n2_spin(self):
w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.spin')
self.assertEqual(len(w.coeffs), 2)
self.assertEqual(len(w.band_energy), 2)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.Gpoints), w.nk)
self.assertEqual(len(w.coeffs[0][0]), w.nb)
self.assertEqual(len(w.band_energy[0]), w.nk)
temp_ggp = Wavecar._generate_G_points
try:
Wavecar._generate_G_points = lambda x, y, gamma: []
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2')
finally:
Wavecar._generate_G_points = temp_ggp
def test__generate_nbmax(self):
self.w._generate_nbmax()
self.assertEqual(self.w._nbmax.tolist(), [5, 5, 5])
def test__generate_G_points(self):
for k in range(self.w.nk):
kp = self.w.kpoints[k]
self.assertLessEqual(len(self.w._generate_G_points(kp)), 257)
def test_evaluate_wavefunc(self):
self.w.Gpoints.append(np.array([0, 0, 0]))
self.w.kpoints.append(np.array([0, 0, 0]))
self.w.coeffs.append([[1 + 1j]])
self.assertAlmostEqual(self.w.evaluate_wavefunc(-1, -1, [0, 0, 0]),
(1 + 1j) / np.sqrt(self.vol), places=4)
self.assertAlmostEqual(self.w.evaluate_wavefunc(0, 0, [0, 0, 0]),
np.sum(self.w.coeffs[0][0]) / np.sqrt(self.vol),
places=4)
def test_fft_mesh(self):
mesh = self.w.fft_mesh(0, 5)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (14, 1, 1))
self.assertEqual(mesh[tuple((self.w.ng / 2).astype(np.int))], 0j)
mesh = self.w.fft_mesh(0, 5, shift=False)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (6, 8, 8))
self.assertEqual(mesh[0, 0, 0], 0j)
def test_fft_mesh_gamma(self):
ik = 0
ib = 0
mesh = self.wH2.fft_mesh(ik, ib)
mesh_gamma = self.wH2_gamma.fft_mesh(ik, ib)
# check equality of plane-wave coefficients
ind_max = np.unravel_index(np.argmax(np.abs(mesh)), mesh.shape)
phase = mesh[ind_max]/mesh_gamma[ind_max]
self.assertLessEqual(np.max(np.abs(mesh-phase*mesh_gamma)), 1.0e-6)
# transform to real space for further checking
mesh = np.fft.ifftn(mesh)
mesh_gamma = np.fft.ifftn(mesh_gamma)
# check equality in real space for regular vs. gamma only
ind_max = np.unravel_index(np.argmax(np.abs(mesh)), mesh.shape)
phase = mesh[ind_max]/mesh_gamma[ind_max]
self.assertLessEqual(np.max(np.abs(mesh-phase*mesh_gamma)), 1.0e-6)
# spot check some points in real space
p1 = (int(mesh.shape[0]/2), int(mesh.shape[1]/2)-1, int(mesh.shape[2]/2)-2)
p2 = (p1[0]+1, p1[1], p1[2])
c = np.array([[5, 0, 0], [0, 4, 0], [0, 0, 6]]) # this needs to match POSCAR, which we don't have
r1 = np.dot(np.array(p1)/mesh.shape, c)
r2 = np.dot(np.array(p2)/mesh.shape, c)
# check equality of FFT and slow FT for regular mesh (ratio, to account for normalization)
v1 = self.wH2.evaluate_wavefunc(ik, ib, r1)
v2 = self.wH2.evaluate_wavefunc(ik, ib, r2)
self.assertAlmostEqual(np.abs(mesh[p1])/np.abs(mesh[p2]), np.abs(v1)/np.abs(v2), places=6)
# spot check one value that we happen to know from reference run
self.assertAlmostEqual(v1, -0.01947068011502887+0.23340228099620275j, places=8)
# check equality of FFT and slow FT for gamma-only mesh (ratio again)
v1_gamma = self.wH2_gamma.evaluate_wavefunc(ik, ib, r1)
v2_gamma = self.wH2_gamma.evaluate_wavefunc(ik, ib, r2)
self.assertAlmostEqual(np.abs(mesh_gamma[p1])/np.abs(mesh_gamma[p2]), np.abs(v1)/np.abs(v2), places=6)
def test_get_parchg(self):
poscar = Poscar.from_file(self.TEST_FILES_DIR / 'POSCAR')
w = self.w
c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertTrue(np.all(c.data['total'] > 0.))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data['total'] > 0.))
w.kpoints.append([0.2, 0.2, 0.2])
with warnings.catch_warnings(record=True) as wrns:
try:
c = w.get_parchg(poscar, 1, 0, spin=0, phase=True)
except IndexError:
pass
self.assertEqual(len(wrns), 1)
w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.spin')
c = w.get_parchg(poscar, 0, 0, phase=False, scale=1)
self.assertTrue('total' in c.data)
self.assertTrue('diff' in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng))
self.assertTrue(np.all(c.data['total'] > 0.))
self.assertFalse(np.all(c.data['diff'] > 0.))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertTrue(np.all(c.data['total'] > 0.))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data['total'] > 0.))
class EigenvalTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
eig = Eigenval(self.TEST_FILES_DIR / 'EIGENVAL.gz')
self.assertEqual(eig.ispin, 1)
self.assertEqual(eig.nkpt, len(eig.kpoints))
self.assertEqual(eig.nkpt, len(eig.kpoints_weights))
self.assertEqual(eig.nkpt, eig.eigenvalues[Spin.up].shape[0])
self.assertEqual(eig.nelect, 16)
self.assertEqual(eig.nbands, eig.eigenvalues[Spin.up].shape[1])
self.assertTrue(np.max(eig.eigenvalues[Spin.up]) > 0)
self.assertTrue(np.min(eig.eigenvalues[Spin.up]) < 0)
def test_ispin2(self):
eig = Eigenval(self.TEST_FILES_DIR / 'EIGENVAL.ispin2.gz')
self.assertEqual(eig.ispin, 2)
self.assertEqual(eig.nkpt, eig.eigenvalues[Spin.up].shape[0])
self.assertEqual(eig.nbands, eig.eigenvalues[Spin.up].shape[1])
self.assertEqual(eig.nkpt, eig.eigenvalues[Spin.down].shape[0])
self.assertEqual(eig.nbands, eig.eigenvalues[Spin.down].shape[1])
def test_eigenvalue_band_properties(self):
eig = Eigenval(self.TEST_FILES_DIR / 'EIGENVAL.gz')
props = eig.eigenvalue_band_properties
self.assertAlmostEqual(props[0], 6.4153, places=4)
self.assertAlmostEqual(props[1], 7.5587, places=4)
self.assertAlmostEqual(props[2], 1.1434, places=4)
self.assertEqual(props[3], False)
class WavederTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
wder = Waveder(self.TEST_FILES_DIR / 'WAVEDER', gamma_only=True)
self.assertEqual(wder.nbands, 36)
self.assertEqual(wder.nkpoints, 56)
self.assertEqual(wder.nelect, 8)
band_i = 0
band_j = 0
kp_index = 0
spin_index = 0
cart_dir_index = 0
cder = wder.get_orbital_derivative_between_states(band_i, band_j, kp_index, spin_index, cart_dir_index)
self.assertAlmostEqual(cder, -1.33639226092e-103, places=114)
def test_consistency(self):
wder = Waveder(self.TEST_FILES_DIR / 'WAVEDER.Si')
wderf = np.loadtxt(self.TEST_FILES_DIR / 'WAVEDERF.Si', skiprows=1)
with open(self.TEST_FILES_DIR / 'WAVEDERF.Si', 'r') as f:
first_line = [int(a) for a in f.readline().split()]
self.assertEqual(wder.nkpoints, first_line[1])
self.assertEqual(wder.nbands, first_line[2])
for i in range(10):
self.assertAlmostEqual(
first=wder.get_orbital_derivative_between_states(0, i, 0, 0, 0).real,
second=wderf[i, 6],
places=10
)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 0].real, wderf[i, 6], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 0].imag, wderf[i, 7], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 1].real, wderf[i, 8], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 1].imag, wderf[i, 9], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 2].real, wderf[i, 10], places=10)
self.assertAlmostEqual(wder.cder_data[0, i, 0, 0, 2].imag, wderf[i, 11], places=10)
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/io/vasp/tests/test_outputs.py
|
Python
|
mit
| 78,963
|
[
"VASP",
"pymatgen"
] |
17cadd9c9ce949b070f9aa7df2d949bab823acf67a4c5f5ab857584bd8ef5ac2
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test force factories in forcefactories.py.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
from functools import partial
from openmmtools.forcefactories import *
from openmmtools import testsystems, states
# =============================================================================
# CONSTANTS
# =============================================================================
MAX_FORCE_RELATIVE_ERROR = 1.0e-6 # maximum allowable relative force error
GLOBAL_FORCE_UNIT = unit.kilojoules_per_mole / unit.nanometers # controls printed units
GLOBAL_FORCES_PLATFORM = None # This is used in every calculation.
# =============================================================================
# TESTING UTILITIES
# =============================================================================
def create_context(system, integrator, platform=None):
"""Create a Context.
If platform is None, GLOBAL_ALCHEMY_PLATFORM is used.
"""
if platform is None:
platform = GLOBAL_FORCES_PLATFORM
if platform is not None:
context = openmm.Context(system, integrator, platform)
else:
context = openmm.Context(system, integrator)
return context
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def compute_forces(system, positions, platform=None, force_group=-1):
"""Compute forces of the system in the given positions.
Parameters
----------
platform : openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
force_group : int flag or set of int, optional
Passed to the groups argument of Context.getState().
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
state = context.getState(getForces=True, groups=force_group)
forces = state.getForces(asNumpy=True)
del context, integrator, state
return forces
def compare_system_forces(reference_system, alchemical_system, positions, name="", platform=None):
"""Check that the forces of reference and modified systems are close.
Parameters
----------
reference_system : openmm.System
Reference System
alchemical_system : openmm.System
System to compare to reference
positions : openmm.unit.Quantity of shape [nparticles,3] with units of distance
The particle positions to use
name : str, optional, default=""
System name to use for debugging.
platform : openmm.Platform, optional, default=None
If specified, use this platform
"""
# Compute forces
reference_force = compute_forces(reference_system, positions, platform=platform) / GLOBAL_FORCE_UNIT
alchemical_force = compute_forces(alchemical_system, positions, platform=platform) / GLOBAL_FORCE_UNIT
# Check that error is small.
def magnitude(vec):
return np.sqrt(np.mean(np.sum(vec**2, axis=1)))
relative_error = magnitude(alchemical_force - reference_force) / magnitude(reference_force)
if np.any(np.abs(relative_error) > MAX_FORCE_RELATIVE_ERROR):
err_msg = ("Maximum allowable relative force error exceeded (was {:.8f}; allowed {:.8f}).\n"
"alchemical_force = {:.8f}, reference_force = {:.8f}, difference = {:.8f}")
raise Exception(err_msg.format(relative_error, MAX_FORCE_RELATIVE_ERROR, magnitude(alchemical_force),
magnitude(reference_force), magnitude(alchemical_force-reference_force)))
def generate_new_positions(system, positions, platform=None, nsteps=50):
"""Generate new positions by taking a few steps from the old positions.
Parameters
----------
platform : openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
nsteps : int, optional, default=50
Number of steps of dynamics to take.
Returns
-------
new_positions : openmm.unit.Quantity of shape [nparticles,3] with units compatible with distance
New positions
"""
temperature = 300 * unit.kelvin
collision_rate = 90 / unit.picoseconds
timestep = 1.0 * unit.femtoseconds
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
integrator.step(nsteps)
new_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return new_positions
# =============================================================================
# TEST FORCE FACTORIES FUNCTIONS
# =============================================================================
def test_restrain_atoms():
"""Check that the restrained molecule's centroid is in the origin."""
host_guest = testsystems.HostGuestExplicit()
topology = mdtraj.Topology.from_openmm(host_guest.topology)
sampler_state = states.SamplerState(positions=host_guest.positions)
thermodynamic_state = states.ThermodynamicState(host_guest.system, temperature=300*unit.kelvin,
pressure=1.0*unit.atmosphere)
# Restrain all the host carbon atoms.
restrained_atoms = [atom.index for atom in topology.atoms
if atom.element.symbol is 'C' and atom.index <= 125]
restrain_atoms(thermodynamic_state, sampler_state, restrained_atoms)
# Compute host center_of_geometry.
centroid = np.mean(sampler_state.positions[:126], axis=0)
assert np.allclose(centroid, np.zeros(3))
def test_replace_reaction_field():
"""Check that replacing reaction-field electrostatics with Custom*Force
yields minimal force differences with original system.
Note that we cannot test for energy consistency or energy overlap because
which atoms are within the cutoff will cause energy difference to vary wildly.
"""
test_cases = [
testsystems.AlanineDipeptideExplicit(nonbondedMethod=openmm.app.CutoffPeriodic),
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
]
platform = openmm.Platform.getPlatformByName('Reference')
for test_system in test_cases:
test_name = test_system.__class__.__name__
# Replace reaction field.
modified_rf_system = replace_reaction_field(test_system.system, switch_width=None)
# Make sure positions are not at minimum.
positions = generate_new_positions(test_system.system, test_system.positions)
# Test forces.
f = partial(compare_system_forces, test_system.system, modified_rf_system, positions,
name=test_name, platform=platform)
f.description = "Testing replace_reaction_field on system {}".format(test_name)
yield f
for test_system in test_cases:
test_name = test_system.__class__.__name__
# Replace reaction field.
modified_rf_system = replace_reaction_field(test_system.system, switch_width=None, shifted=True)
# Make sure positions are not at minimum.
positions = generate_new_positions(test_system.system, test_system.positions)
# Test forces.
f = partial(compare_system_forces, test_system.system, modified_rf_system, positions,
name=test_name, platform=platform)
f.description = "Testing replace_reaction_field on system {} with shifted=True".format(test_name)
yield f
|
choderalab/openmmtools
|
openmmtools/tests/test_forcefactories.py
|
Python
|
mit
| 7,983
|
[
"MDTraj",
"OpenMM"
] |
13468eeef84838aa428c3cc4c67078cba4762ca8cbfe33941cffc168650bf2e7
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from torch.distributions import constraints
from pyro.contrib.gp.kernels.kernel import Kernel
from pyro.nn.module import PyroParam
class DotProduct(Kernel):
r"""
Base class for kernels which are functions of :math:`x \cdot z`.
"""
def __init__(self, input_dim, variance=None, active_dims=None):
super().__init__(input_dim, active_dims)
variance = torch.tensor(1.0) if variance is None else variance
self.variance = PyroParam(variance, constraints.positive)
def _dot_product(self, X, Z=None, diag=False):
r"""
Returns :math:`X \cdot Z`.
"""
if Z is None:
Z = X
X = self._slice_input(X)
if diag:
return (X ** 2).sum(-1)
Z = self._slice_input(Z)
if X.size(1) != Z.size(1):
raise ValueError("Inputs must have the same number of features.")
return X.matmul(Z.t())
class Linear(DotProduct):
r"""
Implementation of Linear kernel:
:math:`k(x, z) = \sigma^2 x \cdot z.`
Doing Gaussian Process regression with linear kernel is equivalent to doing a
linear regression.
.. note:: Here we implement the homogeneous version. To use the inhomogeneous
version, consider using :class:`Polynomial` kernel with ``degree=1`` or making
a :class:`.Sum` with a :class:`.Constant` kernel.
"""
def __init__(self, input_dim, variance=None, active_dims=None):
super().__init__(input_dim, variance, active_dims)
def forward(self, X, Z=None, diag=False):
return self.variance * self._dot_product(X, Z, diag)
class Polynomial(DotProduct):
r"""
Implementation of Polynomial kernel:
:math:`k(x, z) = \sigma^2(\text{bias} + x \cdot z)^d.`
:param torch.Tensor bias: Bias parameter of this kernel. Should be positive.
:param int degree: Degree :math:`d` of the polynomial.
"""
def __init__(self, input_dim, variance=None, bias=None, degree=1, active_dims=None):
super().__init__(input_dim, variance, active_dims)
bias = torch.tensor(1.0) if bias is None else bias
self.bias = PyroParam(bias, constraints.positive)
if not isinstance(degree, int) or degree < 1:
raise ValueError(
"Degree for Polynomial kernel should be a positive integer."
)
self.degree = degree
def forward(self, X, Z=None, diag=False):
return self.variance * (
(self.bias + self._dot_product(X, Z, diag)) ** self.degree
)
|
uber/pyro
|
pyro/contrib/gp/kernels/dot_product.py
|
Python
|
apache-2.0
| 2,648
|
[
"Gaussian"
] |
f8ce8758da6bc7b979df0e7369f368254b704894da51c0bf269f1e8d2089f3c3
|
"""
@name: Modules/House/Family/Insteon/_test/test_Insteon_Link.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 16, 2015
@Summary:
"""
__updated__ = '2020-01-15'
# Import system type stuff
from twisted.trial import unittest
# Import PyMh files and modules.
from Modules.House.Lighting.controllers import ControllerInformation
from Modules.House.Family.Insteon.insteon_link import DecodeLink as linkDecode
from _test.testing_mixin import SetupPyHouseObj
MSG_50 = bytearray(b'\x02\x50\x16\xc9\xd0\x1b\x47\x81\x27\x09\x00')
MSG_53 = bytearray(b'\x02\x53\x00\x01\x12\x34\x56\x02\x04\x06\xFF')
MSG_62 = bytearray(b'\x02\x62\x17\xc2\x72\x0f\x19\x00\x06')
class SetupMixin(object):
"""
"""
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
self.m_obj = ControllerInformation()
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_Insteon_Link')
class B01_Decode(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
def test_53_Decode(self):
self.m_obj._Message = MSG_53
# print(FormatBytes(self.m_obj._Message))
# print(PrettyFormatAny.form(self.m_obj._Message, 'B01-53-A - Obj'))
_l_ret = linkDecode.decode_0x53(self.m_obj)
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Family/Insteon/_test/test_insteon_link.py
|
Python
|
mit
| 1,520
|
[
"Brian"
] |
d3135ef9f86ef806ac7139fcfd55cec5028dc15be0a4d13cc05f0abbc0f52df1
|
'''
synbiochem (c) University of Manchester 2016
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import sys
from Bio.Restriction.Restriction import RestrictionBatch
from Bio.Seq import Seq
from synbiochem.utils import ice_utils
def digest(ice_url, ice_username, ice_password, ice_ids):
'''Digest sequences from ICE.'''
ice_client = ice_utils.ICEClient(ice_url, ice_username, ice_password)
rest_batch = RestrictionBatch(['BamHI', 'EcoRI'])
for ice_id in ice_ids:
seq = ice_client.get_ice_entry(ice_id).get_seq()
rest_sites = rest_batch.search(Seq(seq), linear=False)
rest_sites = sorted([0] + [site - 1
for sites in rest_sites.values()
for site in sites])
dig_seqs = [seq[i:j]
for i, j in zip(rest_sites, rest_sites[1:] + [None])]
dig_seqs = dig_seqs[1:-1] + [dig_seqs[-1] + dig_seqs[0]]
print '\t'.join([ice_id] + [str(len(dig_seq))
for dig_seq in dig_seqs])
def main(args):
'''main method.'''
digest(args[0], args[1], args[2], args[3:])
if __name__ == '__main__':
main(sys.argv[1:])
|
neilswainston/development-py
|
synbiochemdev/design/digest.py
|
Python
|
mit
| 1,303
|
[
"VisIt"
] |
71ca4b4f6da3d88ad6c223ede754120aad360cbc925dd492665762e83f61ed09
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with classes to integrate MM charges into
a QM calculation.
"""
from __future__ import absolute_import
import re
import os
import math
from psi4.driver import *
class Diffuse(object):
def __init__(self, molecule, basisname, ribasisname):
self.molecule = molecule
self.basisname = basisname
self.ribasisname = ribasisname
self.basis = None
self.ribasis = None
self.da = None
self.Da = None
self.wfn = None
def __str__(self):
s = ' => Diffuse <=\n\n'
s = s + ' ' + str(self.molecule) + '\n'
s = s + ' ' + self.basisname + '\n'
s = s + ' ' + self.ribasisname + '\n'
s = s + '\n'
return s
def fitScf(self):
"""Function to run scf and fit a system of diffuse charges to
resulting density.
"""
basisChanged = core.has_option_changed("BASIS")
ribasisChanged = core.has_option_changed("DF_BASIS_SCF")
scftypeChanged = core.has_option_changed("SCF_TYPE")
basis = core.get_option("BASIS")
ribasis = core.get_option("DF_BASIS_SCF")
scftype = core.get_option("SCF_TYPE")
core.print_out(" => Diffuse SCF (Determines Da) <=\n\n")
core.set_global_option("BASIS", self.basisname)
core.set_global_option("DF_BASIS_SCF", self.ribasisname)
core.set_global_option("SCF_TYPE", "DF")
E, ref = energy('scf', return_wfn=True, molecule=self.molecule)
self.wfn = ref
core.print_out("\n")
self.fitGeneral()
core.clean()
core.set_global_option("BASIS", basis)
core.set_global_option("DF_BASIS_SCF", ribasis)
core.set_global_option("SCF_TYPE", scftype)
if not basisChanged:
core.revoke_option_changed("BASIS")
if not ribasisChanged:
core.revoke_option_changed("DF_BASIS_SCF")
if not scftypeChanged:
core.revoke_option_changed("SCF_TYPE")
def fitGeneral(self):
"""Function to perform a general fit of diffuse charges
to wavefunction density.
"""
core.print_out(" => Diffuse Charge Fitting (Determines da) <=\n\n")
self.Da = self.wfn.Da()
self.basis = self.wfn.basisset()
parser = core.Gaussian94BasisSetParser()
self.ribasis = core.BasisSet.construct(parser, self.molecule, "DF_BASIS_SCF")
fitter = core.DFChargeFitter()
fitter.setPrimary(self.basis)
fitter.setAuxiliary(self.ribasis)
fitter.setD(self.Da)
self.da = fitter.fit()
self.da.scale(2.0)
def populateExtern(self, extern):
# Electronic Part
extern.addBasis(self.ribasis, self.da)
# Nuclear Part
for A in range(0, self.molecule.natom()):
extern.addCharge(self.molecule.Z(A), self.molecule.x(A), self.molecule.y(A), self.molecule.z(A))
class QMMM(object):
def __init__(self):
self.charges = []
self.diffuses = []
self.extern = core.ExternalPotential()
def addDiffuse(self, diffuse):
"""Function to add a diffuse charge field *diffuse*."""
self.diffuses.append(diffuse)
def addChargeBohr(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Bohr.
"""
self.charges.append([Q, x, y, z])
def addChargeAngstrom(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Angstroms.
"""
self.charges.append([Q, x / constants.bohr2angstroms, y / constants.bohr2angstroms, z / constants.bohr2angstroms])
def __str__(self):
s = ' ==> QMMM <==\n\n'
s = s + ' => Charges (a.u.) <=\n\n'
s = s + ' %11s %11s %11s %11s\n' % ('Z', 'x', 'y', 'z')
for k in range(0, len(self.charges)):
s = s + ' %11.7f %11.3E %11.3E %11.3E\n' % (self.charges[k][0], self.charges[k][1], self.charges[k][2], self.charges[k][3])
s = s + '\n'
s = s + ' => Diffuses <=\n\n'
for k in range(0, len(self.diffuses)):
s = s + str(self.diffuses[k])
return s
def populateExtern(self):
"""Function to define a charge field external to the
molecule through point and diffuse charges.
"""
# Charges
for charge in self.charges:
self.extern.addCharge(charge[0], charge[1], charge[2], charge[3])
# Diffuses
for diffuse in self.diffuses:
diffuse.populateExtern(self.extern)
|
rmcgibbo/psi4public
|
psi4/driver/qmmm.py
|
Python
|
lgpl-3.0
| 5,536
|
[
"Psi4"
] |
db0d606fe612491704f3bedbb5c1fcc065bb20fd1c85746bd97cf26c0f906b8e
|
# coding: utf-8
from __future__ import unicode_literals, division
"""
Created on Jun 1, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jun 1, 2012"
import unittest
import os
import glob
import shutil
import datetime
from custodian.vasp.handlers import VaspErrorHandler, \
UnconvergedErrorHandler, MeshSymmetryErrorHandler, WalltimeHandler, \
MaxForceErrorHandler, PositiveEnergyErrorHandler, PotimErrorHandler, \
FrozenJobErrorHandler, AliasingErrorHandler, StdErrHandler
from pymatgen.io.vasp import Incar, Poscar, Structure, Kpoints, VaspInput
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cwd = os.getcwd()
def clean_dir():
for f in glob.glob("error.*.tar.gz"):
os.remove(f)
for f in glob.glob("custodian.chk.*.tar.gz"):
os.remove(f)
class VaspErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("KPOINTS", "KPOINTS.orig")
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("CHGCAR", "CHGCAR.orig")
def test_frozen_job(self):
h = FrozenJobErrorHandler()
d = h.correct()
self.assertEqual(d['errors'], ['Frozen job'])
self.assertEqual(Incar.from_file("INCAR")['ALGO'], "Normal")
def test_check_correct(self):
h = VaspErrorHandler("vasp.teterror")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['tet'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'ISMEAR': 0}},
'dict': 'INCAR'}])
h = VaspErrorHandler("vasp.sgrcon")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['rot_matrix'])
self.assertEqual(set([a["dict"] for a in d["actions"]]),
{"KPOINTS"})
h = VaspErrorHandler("vasp.real_optlay")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['real_optlay'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'LREAL': False}},
'dict': 'INCAR'}])
subdir = os.path.join(test_dir, "large_cell_real_optlay")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
h = VaspErrorHandler()
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['real_optlay'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["LREAL"], True)
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['real_optlay'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["LREAL"], False)
shutil.copy("INCAR.orig", "INCAR")
os.remove("INCAR.orig")
os.remove("error.1.tar.gz")
os.remove("error.2.tar.gz")
os.chdir(test_dir)
def test_mesh_symmetry(self):
h = MeshSymmetryErrorHandler("vasp.ibzkpt")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['mesh_symmetry'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'kpoints': [[4, 4, 4]]}},
'dict': 'KPOINTS'}])
def test_dentet(self):
h = VaspErrorHandler("vasp.dentet")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['dentet'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'ISMEAR': 0}},
'dict': 'INCAR'}])
def test_brmix(self):
h = VaspErrorHandler("vasp.brmix")
self.assertEqual(h.check(), True)
# The first (no good OUTCAR) correction, check IMIX
d = h.correct()
self.assertEqual(d["errors"], ['brmix'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["IMIX"], 1)
self.assertTrue(os.path.exists("CHGCAR"))
# The next correction check Gamma and evenize
h.correct()
vi = VaspInput.from_directory(".")
self.assertFalse("IMIX" in vi["INCAR"])
self.assertTrue(os.path.exists("CHGCAR"))
if vi["KPOINTS"].style == Kpoints.supported_modes.Gamma and vi["KPOINTS"].num_kpts < 1:
all_kpts_even = all([
bool(n % 2 == 0) for n in vi["KPOINTS"].kpts[0]
])
self.assertFalse(all_kpts_even)
# The next correction check ISYM and no CHGCAR
h.correct()
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["ISYM"], 0)
self.assertFalse(os.path.exists("CHGCAR"))
shutil.copy("INCAR.nelect", "INCAR")
h = VaspErrorHandler("vasp.brmix")
self.assertEqual(h.check(), False)
d = h.correct()
self.assertEqual(d["errors"], [])
def test_too_few_bands(self):
os.chdir(os.path.join(test_dir, "too_few_bands"))
shutil.copy("INCAR", "INCAR.orig")
h = VaspErrorHandler("vasp.too_few_bands")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['too_few_bands'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'NBANDS': 501}},
'dict': 'INCAR'}])
clean_dir()
shutil.move("INCAR.orig", "INCAR")
os.chdir(test_dir)
def test_rot_matrix(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
subdir = os.path.join(test_dir, "poscar_error")
os.chdir(subdir)
shutil.copy("KPOINTS", "KPOINTS.orig")
h = VaspErrorHandler()
h.check()
d = h.correct()
self.assertEqual(d["errors"], ["rot_matrix"])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
shutil.copy("KPOINTS.orig", "KPOINTS")
os.remove("KPOINTS.orig")
def test_to_from_dict(self):
h = VaspErrorHandler("random_name")
h2 = VaspErrorHandler.from_dict(h.as_dict())
self.assertEqual(type(h2), type(h))
self.assertEqual(h2.output_filename, "random_name")
def test_pssyevx(self):
h = VaspErrorHandler("vasp.pssyevx")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["pssyevx"])
i = Incar.from_file("INCAR")
self.assertEqual(i["ALGO"], "Normal")
def test_eddrmm(self):
h = VaspErrorHandler("vasp.eddrmm")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["eddrmm"])
i = Incar.from_file("INCAR")
self.assertEqual(i["ALGO"], "Normal")
self.assertEqual(h.correct()["errors"], ["eddrmm"])
i = Incar.from_file("INCAR")
self.assertEqual(i["POTIM"], 0.25)
def test_nicht_konv(self):
h = VaspErrorHandler("vasp.nicht_konvergent")
h.natoms_large_cell = 5
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["nicht_konv"])
i = Incar.from_file("INCAR")
self.assertEqual(i["LREAL"], True)
def test_edddav(self):
h = VaspErrorHandler("vasp.edddav")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["edddav"])
self.assertFalse(os.path.exists("CHGCAR"))
def test_gradient_not_orthogonal(self):
h = VaspErrorHandler("vasp.gradient_not_orthogonal")
self.assertEqual(h.check(), True)
self.assertEqual(h.correct()["errors"], ["grad_not_orth"])
i = Incar.from_file("INCAR")
self.assertEqual(i["ISMEAR"], 0)
def tearDown(self):
os.chdir(test_dir)
shutil.move("INCAR.orig", "INCAR")
shutil.move("KPOINTS.orig", "KPOINTS")
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("CHGCAR.orig", "CHGCAR")
clean_dir()
os.chdir(cwd)
class AliasingErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("KPOINTS", "KPOINTS.orig")
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("CHGCAR", "CHGCAR.orig")
def test_aliasing(self):
os.chdir(os.path.join(test_dir, "aliasing"))
shutil.copy("INCAR", "INCAR.orig")
h = AliasingErrorHandler("vasp.aliasing")
h.check()
d = h.correct()
shutil.move("INCAR.orig", "INCAR")
clean_dir()
os.chdir(test_dir)
self.assertEqual(d["errors"], ['aliasing'])
self.assertEqual(d["actions"],
[{'action': {'_set': {'NGX': 34}},
'dict': 'INCAR'}, {"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}},
{"file": "WAVECAR",
"action": {"_file_delete": {'mode': "actual"}}}])
def test_aliasing_incar(self):
os.chdir(os.path.join(test_dir, "aliasing"))
shutil.copy("INCAR", "INCAR.orig")
h = AliasingErrorHandler("vasp.aliasing_incar")
h.check()
d = h.correct()
self.assertEqual(d["errors"], ['aliasing_incar'])
self.assertEqual(d["actions"],
[{'action': {'_unset': {'NGY':1, 'NGZ': 1}},
'dict': 'INCAR'}, {"file": "CHGCAR",
"action": {"_file_delete": {'mode': "actual"}}},
{"file": "WAVECAR",
"action": {"_file_delete": {'mode': "actual"}}}])
incar = Incar.from_file('INCAR.orig')
incar["ICHARG"] = 10
incar.write_file("INCAR")
d = h.correct()
self.assertEqual(d["errors"], ['aliasing_incar'])
self.assertEqual(d["actions"],
[{'action': {'_unset': {'NGY': 1, 'NGZ': 1}},
'dict': 'INCAR'}])
shutil.move("INCAR.orig", "INCAR")
clean_dir()
os.chdir(test_dir)
def tearDown(self):
os.chdir(test_dir)
shutil.move("INCAR.orig", "INCAR")
shutil.move("KPOINTS.orig", "KPOINTS")
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("CHGCAR.orig", "CHGCAR")
clean_dir()
os.chdir(cwd)
class UnconvergedErrorHandlerTest(unittest.TestCase):
def setUp(cls):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
def test_check_correct(self):
subdir = os.path.join(test_dir, "unconverged")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("KPOINTS", "KPOINTS.orig")
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("CONTCAR", "CONTCAR.orig")
h = UnconvergedErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['Unconverged'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
shutil.move("INCAR.orig", "INCAR")
shutil.move("KPOINTS.orig", "KPOINTS")
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("CONTCAR.orig", "CONTCAR")
def test_to_from_dict(self):
h = UnconvergedErrorHandler("random_name.xml")
h2 = UnconvergedErrorHandler.from_dict(h.as_dict())
self.assertEqual(type(h2), UnconvergedErrorHandler)
self.assertEqual(h2.output_filename, "random_name.xml")
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class ZpotrfErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
os.chdir('zpotrf')
shutil.copy("POSCAR", "POSCAR.orig")
shutil.copy("INCAR", "INCAR.orig")
def test_first_step(self):
shutil.copy("OSZICAR.empty", "OSZICAR")
s1 = Structure.from_file("POSCAR")
h = VaspErrorHandler("vasp.out")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d['errors'], ['zpotrf'])
s2 = Structure.from_file("POSCAR")
self.assertAlmostEqual(s2.volume, s1.volume * 1.2 ** 3, 3)
def test_potim_correction(self):
shutil.copy("OSZICAR.one_step", "OSZICAR")
s1 = Structure.from_file("POSCAR")
h = VaspErrorHandler("vasp.out")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d['errors'], ['zpotrf'])
s2 = Structure.from_file("POSCAR")
self.assertAlmostEqual(s2.volume, s1.volume, 3)
self.assertAlmostEqual(Incar.from_file("INCAR")['POTIM'], 0.25)
def tearDown(self):
os.chdir(test_dir)
os.chdir('zpotrf')
shutil.move("POSCAR.orig", "POSCAR")
shutil.move("INCAR.orig", "INCAR")
os.remove("OSZICAR")
clean_dir()
os.chdir(cwd)
class MaxForceErrorHandlerTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
os.chdir(test_dir)
def test_check_correct(self):
#NOTE: the vasprun here has had projected and partial eigenvalues removed
subdir = os.path.join(test_dir, "max_force")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("POSCAR", "POSCAR.orig")
h = MaxForceErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['MaxForce'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
incar = Incar.from_file('INCAR')
poscar = Poscar.from_file('POSCAR')
contcar = Poscar.from_file('CONTCAR')
shutil.move("INCAR.orig", "INCAR")
shutil.move("POSCAR.orig", "POSCAR")
self.assertEqual(poscar.structure, contcar.structure)
self.assertAlmostEqual(incar['EDIFFG'], 0.005)
def tearDown(self):
os.chdir(cwd)
class WalltimeHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
def test_check_and_correct(self):
# The test OSZICAR file has 60 ionic steps. Let's try a 1 hr wall
# time with a 1min buffer
h = WalltimeHandler(wall_time=3600, buffer_time=120)
self.assertFalse(h.check())
# This makes sure the check returns True when the time left is less
# than the buffer time.
h.start_time = datetime.datetime.now() - datetime.timedelta(minutes=59)
self.assertTrue(h.check())
# This makes sure the check returns True when the time left is less
# than 3 x the average time per ionic step. We have a 62 min wall
# time, a very short buffer time, but the start time was 62 mins ago
h = WalltimeHandler(wall_time=3720, buffer_time=10)
h.start_time = datetime.datetime.now() - datetime.timedelta(minutes=62)
self.assertTrue(h.check())
# Test that the STOPCAR is written correctly.
h.correct()
with open("STOPCAR") as f:
content = f.read()
self.assertEqual(content, "LSTOP = .TRUE.")
os.remove("STOPCAR")
h = WalltimeHandler(wall_time=3600, buffer_time=120,
electronic_step_stop=True)
self.assertFalse(h.check())
h.start_time = datetime.datetime.now() - datetime.timedelta(minutes=59)
self.assertTrue(h.check())
h.correct()
with open("STOPCAR") as f:
content = f.read()
self.assertEqual(content, "LABORT = .TRUE.")
os.remove("STOPCAR")
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class PositiveEnergyHandlerTest(unittest.TestCase):
def setUp(cls):
os.chdir(test_dir)
def test_check_correct(self):
subdir = os.path.join(test_dir, "positive_energy")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("POSCAR", "POSCAR.orig")
h = PositiveEnergyErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['Positive energy'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
incar = Incar.from_file('INCAR')
shutil.move("INCAR.orig", "INCAR")
shutil.move("POSCAR.orig", "POSCAR")
self.assertEqual(incar['ALGO'], 'Normal')
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class PotimHandlerTest(unittest.TestCase):
def setUp(cls):
os.chdir(test_dir)
def test_check_correct(self):
subdir = os.path.join(test_dir, "potim")
os.chdir(subdir)
shutil.copy("INCAR", "INCAR.orig")
shutil.copy("POSCAR", "POSCAR.orig")
incar = Incar.from_file('INCAR')
original_potim = incar['POTIM']
h = PotimErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ['POTIM'])
os.remove(os.path.join(subdir, "error.1.tar.gz"))
incar = Incar.from_file('INCAR')
new_potim = incar['POTIM']
shutil.move("INCAR.orig", "INCAR")
shutil.move("POSCAR.orig", "POSCAR")
self.assertEqual(original_potim, new_potim)
self.assertEqual(incar['IBRION'], 3)
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class LrfCommHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
os.chdir('lrf_comm')
for f in ["INCAR", "OUTCAR", "std_err.txt"]:
shutil.copy(f, f+".orig")
def test_lrf_comm(self):
h = StdErrHandler("std_err.txt")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['lrf_comm'])
vi = VaspInput.from_directory(".")
self.assertEqual(vi["INCAR"]["ISTART"], 1)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['lrf_comm'])
self.assertEqual(d["actions"], []) # don't correct twice
def tearDown(self):
os.chdir(test_dir)
os.chdir('lrf_comm')
for f in ["INCAR", "OUTCAR", "std_err.txt"]:
shutil.move(f+".orig", f)
clean_dir()
os.chdir(cwd)
class KpointsTransHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
shutil.copy("KPOINTS", "KPOINTS.orig")
def test_kpoints_trans(self):
h = StdErrHandler("std_err.txt.kpoints_trans")
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['kpoints_trans'])
self.assertEqual(d["actions"],
[{u'action': {u'_set':
{u'kpoints': [[4, 4, 4]]}},
u'dict': u'KPOINTS'}])
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['kpoints_trans'])
self.assertEqual(d["actions"], []) # don't correct twice
def tearDown(self):
shutil.move("KPOINTS.orig", "KPOINTS")
clean_dir()
os.chdir(cwd)
class OutOfMemoryHandlerTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
shutil.copy("INCAR", "INCAR.orig")
def test_oom(self):
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.oom", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 2}}}])
h = StdErrHandler("std_err.txt.oom2", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 1}}}])
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.emlsp", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 2}}}])
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.insufficient_mem", correct_out_of_memory=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['out_of_memory'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'KPAR': 2}}}])
def test_seg_fault(self):
vi = VaspInput.from_directory(".")
from custodian.vasp.interpreter import VaspModder
VaspModder(vi=vi).apply_actions([{"dict": "INCAR",
"action": {"_set": {"KPAR": 4}}}])
h = StdErrHandler("std_err.txt.seg_fault", correct_seg_fault=True)
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['seg_fault'])
self.assertEqual(d["actions"],
[{'dict': 'INCAR',
'action': {'_set': {'ISMEAR': '0'}}}])
self.assertEqual(h.check(), True)
d = h.correct()
self.assertEqual(d["errors"], ['seg_fault'])
# no fix if still seg fault
self.assertEqual(d["actions"], [])
def tearDown(self):
shutil.move("INCAR.orig", "INCAR")
clean_dir()
os.chdir(cwd)
if __name__ == "__main__":
unittest.main()
|
xhqu1981/custodian
|
custodian/vasp/tests/test_handlers.py
|
Python
|
mit
| 22,701
|
[
"VASP",
"pymatgen"
] |
461584f88c398475ba0cfd7077ec76e28eb33861f7ea37081fd1c393a9335e3d
|
import glob
import os
import pandas as pd
import xarray as xr
import numpy as np
import numpy as np
import datetime as dt
import itertools
import yaml
import logging
import sys
from tqdm import tqdm
import tempfile
from . import apply_func
def parse_to_dataframe(list_or_dict_or_dataframe):
if type(list_or_dict_or_dataframe) == pd.core.frame.DataFrame:
return list_or_dict_or_dataframe
elif type(list_or_dict_or_dataframe) == list:
return pd.DataFrame(list_or_dict_or_dataframe)
elif type(list_or_dict_or_dataframe) == dict:
return pd.DataFrame.from_dict([dict(zip(list_or_dict_or_dataframe.keys(),v)) for v in itertools.product(*list_or_dict_or_dataframe.values())])
# test ;lkjdfg
def empty_multiindex(names):
"""
Creates empty MultiIndex from a list of level names.
"""
return pd.MultiIndex.from_tuples(tuples=[(None,) * len(names)], names=names)
def apply_func_wrapper(
func,
lib_dataarrays,
dataarrays,
archive_out,
xarray_function_wrapper=apply_func,
dataarrays_wrapper = lambda *x: (*x,),
groupby=None,
apply_groups_in=[],
apply_groups_out=[],
divide_into_groups_extra = [],
mode = 'numpy_output_to_disk_in_chunks',
inherit_attributes = False,
query=None,
extra_attributes={},
post_apply=None,
initialize_array=None,
copy_coordinates=False,
update_pickle=True,
force_recalculate=False,
#lib_dataarrays = self.lib_dataarrays
**kwargs,
):
"""
purpose: this wrapper routine allows to apply a function in automatic groups of
dataarrays in an archive or collection of archive, and dumps the output to
a specified output archive.
input:
output:
"""
apply_groups_in_df = parse_to_dataframe(apply_groups_in)
apply_groups_out_df = parse_to_dataframe(apply_groups_out)
divide_into_groups = []
for name in lib_dataarrays.index.names:
if (name not in apply_groups_in_df.columns):
divide_into_groups.append(name)
for name in divide_into_groups_extra:
if name not in divide_into_groups:
divide_into_groups.append(name)
if query is not None:
if type(query) == str:
read_lib_dataarrays = lib_dataarrays.query(query).copy()
elif type(query) == pd.DataFrame:
read_lib_dataarrays = query
else:
raise ValueError('type of input query ' + query + 'not implemented')
else:
read_lib_dataarrays = lib_dataarrays.copy()
if len(divide_into_groups) == 0:
print('creating dummy group that encompasses the whole library')
divide_into_groups = ['dummy_group']
read_lib_dataarrays['dummy_group'] = ""
groups_in_loop = read_lib_dataarrays.reset_index().groupby(divide_into_groups)
print('Looping over data array input groups: ', list(groups_in_loop))
for idx, group in tqdm(groups_in_loop):
def always_tuple(idx):
if type(idx) is not tuple:
return (idx,)
else:
return idx
def always_multi_index(index):
if type(index) == pd.core.indexes.base.Index:
return pd.MultiIndex.from_tuples([x.split()[::-1] for x in index], names=(index.name,))
else:
return index
apply_this_group_in_df = apply_groups_in_df.copy()
for idx_group_in, row in apply_this_group_in_df.iterrows():
for column in apply_this_group_in_df.columns:
if row[column] is None:
# import pdb;pdb.set_trace()
apply_this_group_in_df[column].loc[idx_group_in] = group.loc[:, column].unique()
for column in apply_this_group_in_df.columns:
apply_this_group_in_df = apply_this_group_in_df.explode(column)
multi_idx = always_tuple(idx)
check_group_columns = []
for column in apply_this_group_in_df.columns:
if (~apply_this_group_in_df[column].isnull()).any():
check_group_columns.append(column)
group_reduced = group[check_group_columns].drop_duplicates().reset_index().drop(columns='index')
apply_this_group_in_df_reduced = apply_this_group_in_df[check_group_columns].drop_duplicates().reset_index().drop(
columns='index')
if len(apply_groups_in_df) == 0:
all_input_available_in_this_group = True
else:
apply_this_group_in_reduced_index = pd.MultiIndex.from_frame(apply_this_group_in_df_reduced)
group_reduced_indexed_for_apply_groups_in = group.set_index(list(apply_this_group_in_df_reduced.columns))
group_reduced_indexed_for_apply_groups_in.index = always_multi_index(
group_reduced_indexed_for_apply_groups_in.index)
all_input_available_in_this_group = not (len(apply_this_group_in_reduced_index) != len(
apply_this_group_in_reduced_index.intersection(group_reduced_indexed_for_apply_groups_in.index)))
# group_indexed_for_apply_groups_in = group.set_index(list(apply_this_group_in_df.columns))
#
#
#
# apply_this_group_in_index = pd.MultiIndex.from_frame(apply_this_group_in_df)
# group_indexed_for_apply_groups_in.index = always_multi_index(apply_this_group_in_df.index)
#
# stop
#
# #group_selection_index = pd.MultiIndex.from_frame(frame_for_selecting_xarrays_from_current_group)
#
# all_input_available_in_this_group = not (group_indexed_for_apply_groups_in.index != apply_groups_in_index).any()
#
if all_input_available_in_this_group:
if len(apply_this_group_in_df) == 0:
table_this_group_in = group
else:
apply_this_group_in_index = pd.MultiIndex.from_frame(apply_this_group_in_df)
group_indexed_for_apply_groups_in = group.set_index(list(apply_this_group_in_df.columns))
group_indexed_for_apply_groups_in.index = always_multi_index(group_indexed_for_apply_groups_in.index)
# apply_this_group_in_df = apply_groups_in_df.copy()
# for idx_group_in,row in apply_this_group_in_df.iterrows():
# for column in apply_this_group_in_df.columns:
# if row[column] is None:
# apply_this_group_in_df[column].loc[idx_group_in] = group.loc[:,column].unique()
# import pdb; pdb.set_trace()
# apply_this_group_in_index = apply_this_group_in_index.intersection(group_indexed_for_apply_groups_in.index,sort=None)
# import pdb; pdb.set_trace()
table_this_group_in = group_indexed_for_apply_groups_in.loc[apply_this_group_in_index]
# apply_groups_out_index = pd.MultiIndex.from_frame(apply_groups_out_df)
# group_indexed_for_apply_groups_out = group.set_index(list(apply_groups_out_df.columns))
# group_indexed_for_apply_groups_out.index = always_multi_index(group_indexed_for_apply_groups_out.index)
apply_groups_out_df_this_group = apply_groups_out_df.copy()
print('converting label functions where necessary')
for idx_group_out, row in apply_groups_out_df.iterrows():
for key, value in row.items():
if type(apply_groups_out_df_this_group.loc[idx_group_out, key]).__name__ == 'function':
apply_groups_out_df_this_group.loc[idx_group_out, key] = apply_groups_out_df.loc[
idx_group_out, key](*tuple(table_this_group_in.reset_index()[[key]].values[:, 0]))
# elif apply_groups_out_df_this_group.loc[idx_group_out,key] is None:
# raise ValueError('Not supported yet')
else:
apply_groups_out_df_this_group.loc[idx_group_out, key] = apply_groups_out_df.loc[idx_group_out, key]
table_this_group_out = apply_groups_out_df_this_group
dataarrays_group_in = []
for idx_group_in, row in table_this_group_in.iterrows():
if table_this_group_in.index.names[0] is None: # trivial case where no group_in selection is made
index_dataarray = [dict(zip(table_this_group_in.columns, row))[key] for key in
lib_dataarrays.index.names]
else:
index_dataarray = [{**dict(zip(table_this_group_in.index.names, idx_group_in)),
**dict(zip(table_this_group_in.columns, row))}[key] for key in
lib_dataarrays.index.names]
# index_dataarray = [{**dict(zip(table_this_group_in.index.names,idx_group_in)),**dict(zip(table_this_group_in.columns,row))}[key] for key in self.lib_dataarrays.index.names]
row_of_dataarray = lib_dataarrays.loc[tuple(index_dataarray)]
if tuple(index_dataarray) in dataarrays.keys():
dataarrays_group_in.append(dataarrays[tuple(index_dataarray)])
else:
dataarrays_group_in.append(xr.open_dataarray(row_of_dataarray.absolute_path))
# ??????
# for dataarray in dataarrays_group_in:
# dataarray.close()
# del dataarray
# building attributes of output dataarrays
ifile = 0
attributes_dataarrays_out = []
filenames_out = []
dataarrays_out_already_available = []
for idx_group_out, row in table_this_group_out.iterrows():
attributes_dataarrays_out.append({})
if inherit_attributes:
if table_this_group_in.index.names[0] is None: # trivial case where no group_in selection is made
attributes_in = \
dict(zip(table_this_group_in.columns,
table_this_group_in.iloc[min(ifile, len(dataarrays_group_in) - 1)]))
else:
attributes_in = \
{
**dict(zip(table_this_group_in.index.names,
table_this_group_in.iloc[min(ifile, len(dataarrays_group_in) - 1)].name)),
**dict(zip(table_this_group_in.columns,
table_this_group_in.iloc[min(ifile, len(dataarrays_group_in) - 1)]))
}
for key, value in attributes_in.items():
if (key not in attributes_dataarrays_out[ifile]) and \
((inherit_attributes == True) or (key in inherit_attributes)) and \
(key not in ['absolute_path_as_cache', 'absolute_path_for_reading', 'absolute_path',
'path','available']):
attributes_dataarrays_out[ifile][key] = value
# !!
for key in lib_dataarrays.index.names:
if key in table_this_group_out.columns:
attributes_dataarrays_out[ifile][key] = row[key]
elif key in table_this_group_in.index.names:
attributes_dataarrays_out[ifile][key] = \
table_this_group_in.iloc[min(ifile, len(dataarrays_group_in) - 1)].name[
table_this_group_in.index.names.index(key)]
else:
attributes_dataarrays_out[ifile][key] = \
table_this_group_in.iloc[min(ifile, len(dataarrays_group_in) - 1)][key]
for key in row.index:
attributes_dataarrays_out[ifile][key] = row[key]
# for key in self.lib_dataarrays.columns:
# if key == 'provider':
# import pdb; pdb.set_trace()
# if (key not in attributes_dataarrays_out[ifile]) and (inherit_attributes or (key in inherit_attributes)) and (key not in ['absolute_path','path']):
# attributes_dataarrays_out[ifile][key] = {**dict(zip(table_this_group_in.index.names,table_this_group_in.iloc[min(ifile,len(dataarrays_group_in)-1)])),**dict(zip(table_this_group_in.columns,table_this_group_in.iloc[min(ifile,len(dataarrays_group_in)-1)]))}[key]
for key, value in extra_attributes.items():
attributes_dataarrays_out[ifile][key] = value
index_keys = ['variable','source','time','space']
index_out = []
for key in index_keys:
index_out.append(attributes_dataarrays_out[ifile][key])
dataarrays_out_already_available.append(tuple(index_out) in archive_out.lib_dataarrays.index)
if mode in ['numpy_output_to_disk_in_chunks', 'numpy_output_to_disk_no_chunks']:
if (archive_out.file_pattern is None):
raise ValueError("I don't know how to write the data file to disk. Please set to file_pattern")
filename_out = os.path.dirname(archive_out.path_pickle) + '/' + ''.join(np.array(list(
zip(archive_out.file_pattern.split('"')[::2],
[attributes_dataarrays_out[ifile][key] for key in archive_out.file_pattern.split('"')[1::2]] + [
'']))).ravel())
# index_out =
# if archive_out.lib_dataarrays.index[df['absolute_path'] == filename].tolist() == index_out
if (filename_out in archive_out.lib_dataarrays.absolute_path.unique()) and (not dataarrays_out_already_available[ifile]):
raise ValueError(
'filename ' + filename_out + ' already exists and not already managed/within the output archive. Consider revising the output file_pattern.')
filenames_out.append(filename_out)
ifile += 1
all_dataarrays_out_already_available = np.prod(dataarrays_out_already_available)
if all_dataarrays_out_already_available and not force_recalculate:
logging.info('All output data is already available in the output archive and force_recalculate is switched False. Skipping group "'+str(idx)+'"')
else:
if force_recalculate and all_dataarrays_out_already_available:
logging.info('all output dataarrays were available but force_recalulate is set True, so I force recaculation')
if mode == 'xarray':
print('making a temporary dataarray copy to prevent data hanging around into memory afterwards')
dataarrays_group_in_copy = [dataarray.copy(deep=False) for dataarray in dataarrays_group_in]
temp_dataarrays = func(*dataarrays_wrapper(*tuple(dataarrays_group_in_copy)))
# temp_dataarrays = func(*dataarrays_wrapper(*tuple(dataarrays_group_in)))
for idataarray, dataarray in enumerate(temp_dataarrays):
for key, value in attributes_dataarrays_out[idataarray].items():
if key == 'variable':
dataarray.name = value
else:
dataarray.attrs[key] = value
archive_out.add_dataarray(dataarray) # attributes_dataarrays_out[idataarray])
for idataarray in range(len(dataarrays_group_in_copy)):
dataarrays_group_in_copy[idataarray].close()
for itemp_dataarray in range(len(temp_dataarrays)):
temp_dataarrays[itemp_dataarray].close()
elif mode in ['numpy_output_to_disk_in_chunks', 'numpy_output_to_disk_no_chunks']:
if mode == 'numpy_output_to_disk_in_chunks':
xarray_function_wrapper(func, dataarrays_wrapper(*tuple(dataarrays_group_in)),
filenames_out=filenames_out, attributes=attributes_dataarrays_out, release=True,
initialize_array=initialize_array, copy_coordinates=copy_coordinates, **kwargs)
elif mode == 'numpy_output_to_disk_no_chunks':
temp_dataarrays = xarray_function_wrapper(func, dataarrays_wrapper(*tuple(dataarrays_group_in)),
**kwargs)
if type(temp_dataarrays) != tuple:
print(
'this is a workaround in case we get a single dataarray instead of tuple of dataarrays from the wrapper function. This needs revision')
idataarray = 0
for key, value in attributes_dataarrays_out[idataarray].items():
if key not in archive_out.not_dataarray_attributes:
if type(value) == bool:
temp_dataarrays.attrs[key] = int(value)
else:
temp_dataarrays.attrs[key] = value
if key == 'variable':
temp_dataarrays.name = value
# import pdb;pdb.set_trace()
os.system('rm ' + filenames_out[idataarray])
if post_apply is not None:
post_apply(temp_dataarrays)
os.system('mkdir -p ' + os.path.dirname(filenames_out[idataarray]))
temp_dataarrays.to_netcdf(filenames_out[idataarray])
temp_dataarrays.close()
else:
for idataarray in range(len(temp_dataarrays)):
for key, value in attributes_dataarrays_out[idataarray].items():
if key not in ['variable', 'absolute_path_for_reading', 'absolute_path_as_cache',
'absolute_path', 'path']:
temp_dataarrays[idataarray].attrs[key] = value
if key == 'variable':
temp_dataarrays[idataarray].name = value
for idataarray in range(len(temp_dataarrays)):
if post_apply is not None:
post_apply(temp_dataarrays[idataarray])
os.system('rm ' + filenames_out[idataarray])
os.system('mkdir -p ' + os.path.dirname(filenames_out[idataarray]))
temp_dataarrays[idataarray].to_netcdf(filenames_out[idataarray])
temp_dataarrays[idataarray].close()
for ixr_out, filename_out in enumerate(filenames_out):
archive_out.add_dataarray(filename_out)
else:
ValueError('mode ' + mode + ' not implemented')
for idataarray in reversed(range(len(dataarrays_group_in))):
dataarrays_group_in[idataarray].close()
del dataarrays_group_in[idataarray]
if update_pickle:
archive_out.update(force_overwrite_pickle =True)
class collection (object):
def __init__(self,archives,*args,**kwargs):
self.archives = archives
def get_lib_dataarrays(self):
lib_dataarrays = pd.DataFrame()
for archive in self.archives:
lib_dataarrays = lib_dataarrays.append(archive.lib_dataarrays).sort_index()
return lib_dataarrays
def get_dataarrays(self):
dataarrays = {}
for archive in self.archives:
dataarrays = {**dataarrays, **archive.dataarrays}
return dataarrays
def apply_func(
self,
func,
archive_out = None,
add_archive_out_to_collection=True,
update_pickle=True,
**kwargs
):
if type(archive_out) is str:
archive_out = archive(archive_out)
write_mode = 'create_new_archive'
elif archive_out is not None: # type is considered an archive object
write_mode = 'add_to_external_archive'
else:
write_mode = 'add_to_current_archive'
lib_dataarrays = self.get_lib_dataarrays()
dataarrays = self.get_dataarrays()
apply_func_wrapper(
func,
lib_dataarrays = lib_dataarrays,
dataarrays = dataarrays,
archive_out=archive_out,
update_pickle=update_pickle,
**kwargs
)
if add_archive_out_to_collection and (archive_out not in self.archives):
self.archives.append(archive_out)
if write_mode == 'create_new_archive':
return archive_out
class archive (object):
def __init__(self,path=None,file_pattern='"variable"_"source"_"time"_"space".nc', *args,**kwargs):
self.lib_dataarrays = pd.DataFrame(index=empty_multiindex(['variable','source','time','space']),columns = ['path','absolute_path','available']).iloc[1:]
self.settings_keys = ['file_pattern','mode']
print('Creating generic functions to set attributes')
for key in self.settings_keys:
print('creating function self.set_'+key)
self.__dict__['set_'+key] = lambda value: self.__setattr__(key,value)
print('Loading default settings')
self.file_pattern = file_pattern
self.not_dataarray_attributes = ['variable', 'absolute_path', 'absolute_path_as_cache',
'absolute_path_for_reading', 'path', 'available']
self.mode = 'active'
self.dataarrays = {}
self.coordinates = {}
self.path_pickle = None
print('Loading datasets')
if path is not None:
self.load(path,*args,**kwargs)
def copy(self):
return self.archive.apply(lambda x: x.copy())
def sel(self,sel):
return self.archive.apply(lambda x: x.sel(sel))
def sel_lib(self,sel):
lib_dataarrays_out = self.lib_dataarrays_out[sel]
archive_out = archive()
for index,lib_dataarray in lib_dataarays_out.iterrows():
archive_out.add_dataarray(self.dataarrays[index])
def remove(self,query=None,update_pickle = True,dataarrays=True,records=False):
if (not dataarrays) and records:
raise ValueError('the dataarrays on the disk is maintained by this archive.'
'Not removing them while removing them from the database will lead to orphaned files. Aborting... ')
if query is not None:
read_lib_dataarrays = self.lib_dataarrays.query(query).copy()
else:
read_lib_dataarrays = self.lib_dataarrays.copy()
for idx,row in read_lib_dataarrays.iterrows():
if dataarrays:
CMD ='rm '+row.absolute_path
os.system(CMD)
# if 'available' not in self.lib_dataarrays.columns:
# self.lib_dataarrays['available'] = ""
# self.lib_dataarrays['available'] = True
if records:
self.lib_dataarrays = self.lib_dataarrays.drop(idx)
else:
self.lib_dataarrays.loc[idx]['available'] = False
if update_pickle:
self.update(force_overwrite_pickle =True)
def remove_by_index(self,index,delete_on_disk=False,update_pickle=True):
self.dataarrays[index].close()
del self.dataarrays[index]
if delete_on_disk:
os.system('rm '+self.lib_dataarrays.loc[index].absolute_path)
print(self.lib_dataarrays.loc[index].absolute_path_as_cache)
if (self.lib_dataarrays.loc[index].absolute_path_as_cache is not None):
print(np.isnan(self.lib_dataarrays.loc[index].absolute_path_as_cache))
print(np.isnan(self.lib_dataarrays.loc[index].absolute_path_as_cache) == True)
if (self.lib_dataarrays.loc[index].absolute_path_as_cache is not None) and (np.isnan(self.lib_dataarrays.loc[index].absolute_path_as_cache == False)) :
CMD = 'rm '+self.lib_dataarrays.loc[index].absolute_path_as_cache
print('removing cached file:',CMD)
self.lib_dataarrays.drop(index=index,inplace=True)
if update_pickle:
self.update(force_overwrite_pickle=True)
def close(self,delete_archive=False):
lib_dataarrays_temp = self.lib_dataarrays.copy()
for index,columns in lib_dataarrays_temp.iterrows():
self.remove_by_index(index=index,delete_on_disk=(delete_archive==True))
del lib_dataarrays_temp
if delete_archive:
os.system('rm '+self.path_pickle)
def add_from_dataset(self,Dataset_or_filepath,variables=None,**kwargs):
if type(Dataset_or_filepath).__name__ == 'str':
Dataset = xr.open_dataset(Dataset_or_filepath)
kwargs['absolute_path'] = os.path.abspath(Dataset_or_filepath)
kwargs['absolute_for_reading'] = kwargs['absolute_path']
else:
Dataset = Dataset_or_filepath
if variables is None:
variables= []#Dataset.variables
for variable in Dataset.variables:
print(Dataset.dims)
if variable not in Dataset.dims:
variables.append(variable)
for variable in variables:
self.add_dataarray(Dataset[variable],**kwargs)
Dataset.close()
def add_dataarray(
self,
DataArray_or_filepath,
skip_unavailable= False,
release_dataarray_pointer=False,
cache_to_tempdir=False,
cache_to_ram=False,
reset_space=False,
**kwargs,
):
#DataArray = None
if type(DataArray_or_filepath).__name__ == 'str':
filepath = DataArray_or_filepath
if (cache_to_tempdir is None) or cache_to_tempdir:
if type(cache_to_tempdir) is not str:
cache_to_tempdir = tempfile.gettempdir()
#filepath_as_cache = cache_to_tempdir+'/'+os.path.basename(filepath)
filepath_as_cache = tempfile.mktemp(prefix=os.path.basename(filepath)[:-3]+'_',suffix='.nc',dir=cache_to_tempdir)
CMD='cp '+filepath+' '+filepath_as_cache
print('caching to temporary file: ',CMD)
os.system(CMD)
filepath_for_reading = filepath_as_cache
else:
filepath_as_cache = None
filepath_for_reading = filepath
if cache_to_ram:
CMD='cat '+filepath_for_reading+' > /dev/null'
print('caching to ram:',CMD)
os.system(CMD)
# ncvariable: variable as seen on disk
# variable (= DataArray.name): variable as considered in the library
ncvariable = None # netcdf variable as seen on disk, not necessarily in the library
if ('ncvariable' not in kwargs.keys()) or ((type(kwargs['ncvariable']).__name__ == 'float') and np.isnan(kwargs['ncvariable'])):
if 'variable' in kwargs.keys():
ncvariable = kwargs['variable']
print('Opening file:',filepath_for_reading, '(original file: '+filepath+')')
if ncvariable is not None:
# print('reading',filepath,ncvariable)
# try:
# print('trying with open_dataarray first because of obscure performance decreases with xr.open_dataset')
# DataArray = xr.open_dataarray(filepath)
# if DataArray.name != kwargs['ncvariable']:
# DataArray.close()
# del DataArray
# print('first variable is not the correct ncvariable. Trying it with xr.open_dataset...')
# raise ValueError('first variable is not the correct ncvariable')
# except:
try:
ds = xr.open_dataset(filepath_for_reading)
DataArray = ds[ncvariable]
ds.close()
del ds
#kwargs['ncvariable'] = ncvariable
except:
DataArray = xr.open_dataarray(filepath_for_reading)
else:
DataArray = xr.open_dataarray(filepath_for_reading)
kwargs['ncvariable'] = DataArray.name
else:
ds = xr.open_dataset(filepath_for_reading)
DataArray = ds[kwargs['ncvariable']]
ds.close()
del ds
# except:
# print ( "Warning! Error while reading from disk: ", sys.exc_info()[0])
# if not skip_unavailable:
# raise ('...stopping' )
# else:
# print('...skipping' )
kwargs['absolute_path'] = os.path.abspath(filepath)
kwargs['absolute_path_for_reading'] = os.path.abspath(filepath_for_reading)
kwargs['absolute_path_as_cache'] = (None if filepath_as_cache is None else os.path.abspath(filepath_as_cache))
kwargs['available'] = True
else:
DataArray = DataArray_or_filepath
#kwargs['absolute_path'] = None
# for key in self.lib_dataarrays.index.names:
# if key not in (list(DataArray.attrs.keys())+['variable']):
# raise ValueError(key+' needs to be in DataArray.attrs')
# if DataArray is None:
# print('Skipping ',DataArray_or_filepath,kwargs)
# else:
if not DataArray is None:
dict_index = {}
dict_columns = {}
if DataArray.name is None:
raise ValueError ('input dataarray should have a name')
if 'variable' not in kwargs.keys():
kwargs['variable'] = DataArray.name
DataArray.name = kwargs['variable']
dict_index['variable'] = DataArray.name
for key,value in DataArray.attrs.items():
if key in self.lib_dataarrays.index.names:
dict_index[key] = value
else:
dict_columns[key] = value
# for key in self.lib_dataarrays.index.names:
# if key in kwargs.keys():
# dict_index[key] = kwargs[key]
for key,value in kwargs.items():
if key in self.lib_dataarrays.index.names:
dict_index[key] = kwargs[key]
else:
dict_columns[key] = kwargs[key]
if ('time' not in dict_index.keys()) or (dict_index['time'] is None) or (type(dict_index['time']).__name__ == 'float') and ( np.isnan(dict_index['time']).any()):
print('Guessing time coordinate from DataArray')
# is month type
#monthly spacing
if np.apply_along_axis(lambda y: np.sum((y[1:] - y[:-1] != 1),0),0,np.vectorize(lambda x: int(x[:4])*12+int(x[5:7]))(DataArray.time.values.astype('str'))).item() == 0:
dict_index['time'] = \
'monthly_'+str(DataArray.time[0].values)[:7]+'_'+str(DataArray.time[-1].values)[:7]
# also monthly
elif (not np.any( ~(np.vectorize(lambda x: x[8:])(DataArray.time.values.astype('str')) == '01T00:00:00.000000000'))):
dict_index['time'] = \
'monthly_'+str(DataArray.time[0].values)[:7]+'_'+str(DataArray.time[-1].values)[:7]
elif not np.any((DataArray.time[2:-1].values - DataArray.time[1:-2].values) != np.array(86400000000000, dtype='timedelta64[ns]')):
#daily
dict_index['time'] = 'daily_'+np.datetime_as_string(DataArray.time[0].values,unit='D')+'_'+np.datetime_as_string(DataArray.time[-1].values,unit='D')
elif not np.any((DataArray.time[2:-1].values - DataArray.time[1:-2].values) != dt.timedelta(days=1)):
dict_index['time'] = \
'daily_'+str(DataArray.time[0].values)[:10]+'_'+str(DataArray.time[-1].values)[:10]
else:
raise ValueError('time dimension not implemented')
#DataArray.attrs['time'] = dict_index['time']
renamings = {'lat':'latitude','lon':'longitude'}
for key,value in renamings.items():
if key in DataArray.dims:
DataArray = DataArray.rename({key:value})
# filter coordinates that are listed in the library index (these are not treated under space but separately, eg., 'time').
space_coordinates = list(DataArray.dims)
for key in self.lib_dataarrays.index.names:
if key in space_coordinates:
space_coordinates.remove(key)
if ('space' not in dict_index.keys()) or (dict_index['space'] is None) or reset_space:
spacing = {}
for coordinate in space_coordinates:
spacing_temp = (DataArray[coordinate].values[1] - DataArray[coordinate].values[0])
if not np.any(DataArray[coordinate][1:].values != (DataArray[coordinate].values[:-1] + spacing_temp)):
spacing[coordinate] = str(DataArray[coordinate][0].values)+','+str(DataArray[coordinate][-1].values)+','+str(spacing_temp)
else:
spacing[coordinate] = 'irregular'
dict_index_space = [key+':'+str(value) for key,value in spacing.items()]
dict_index_space ='_'.join(dict_index_space)
dict_index['space'] = dict_index_space
#DataArray.attrs['space'] = dict_index_space
for key,index in dict_index.items():
if key not in self.coordinates:
if key not in ['variable','source','space',]:
self.coordinates[key] = DataArray[key]
if key == 'space':
self.coordinates[key] = []
for coordinate in space_coordinates:
self.coordinates[key].append(DataArray[coordinate])
for key in self.lib_dataarrays.index.names:
if (key not in dict_index.keys()) or (dict_index[key] is None):
raise ValueError ('Could not track key "'+key+'" that is required for the archive index.')
index = tuple([dict_index[key] for key in self.lib_dataarrays.index.names])
self.dataarrays[index] = DataArray
DataArray.close()
del DataArray
if (self.mode == 'passive') and (not self.lib_dataarrays.loc[index]['absolute_path'].isnull().any() ):
self.dataarrays[index].close()
for key,value in dict_index.items():
if key == 'variable':
self.dataarrays[index].name = value
else:
self.dataarrays[index].attrs[key] = value
if 'absolute_path' not in self.lib_dataarrays.columns:
self.lib_dataarrays['absolute_path'] = None
if 'absolute_path_for_reading' not in self.lib_dataarrays.columns:
self.lib_dataarrays['absolute_path_for_reading'] = None
if 'absolute_path_as_cache' not in self.lib_dataarrays.columns:
self.lib_dataarrays['absolute_path_as_cache'] = None
# if 'dataarray_pointer' not in self.lib_dataarrays.columns:
# self.lib_dataarrays['dataarray_pointer'] = None
if 'path' not in self.lib_dataarrays.columns:
self.lib_dataarrays['path'] = None
if 'available' not in self.lib_dataarrays.columns:
self.lib_dataarrays['available'] = None
self.lib_dataarrays.loc[index] = None
for key,value in dict_columns.items():
if key not in self.lib_dataarrays.columns:
self.lib_dataarrays[key] = ''
if index not in self.lib_dataarrays.index:
self.lib_dataarrays.loc[index] = ''
self.lib_dataarrays[key].loc[index] = value
if key not in [
#'dataarray_pointer',
'absolute_path_as_cache',
'absolute_path_for_reading',
'absolute_path',
'path',
'available']:
self.dataarrays[index].attrs[key] = value
self.lib_dataarrays.sort_index(inplace=True)
if release_dataarray_pointer:
print('closing',index)
self.dataarrays[index].close()
if cache_to_tempdir:
CMD='rm '+filepath_as_cache
print('Released pointer, so removing cached file: ',CMD)
os.system(CMD)
# del self.dataarrays[index]
#self.lib_dataarrays.loc[index,'dataarray_pointer'] = None
# else:
# self.lib_dataarrays.loc[index,'dataarray_pointer'] = self.dataarrays[index]
# if self.path_pickle:# and (type(self.lib_dataarrays.loc[index].absolute_path) != str):
# self.dump()
# if attrs is not None:
# for key,value in attrs.items():
# DataArray
# for key,value in kwargs.items():
# if key not in self.lib_dataarrays.columns:
# self.lib_dataarrays[key] = ''
# self.lib_dataarrays.loc[index][key] = value
# def intersect_times(self,DataArray_input):
def dataarrays_apply(self,function,query=None,inplace=False, attrs=None):
# if query is not None:
# lib_dataarrays_out = self.lib_dataarrays.query(query,engine='python').copy()
# elsepath
# lib_dataarrays_out = self.lib_dataarrays.copy()
archive_out = archive()
for index,columns in self.lib_dataarrays.iterrows():
dataarray_out_temp = function(self.dataarrays[index])
for key,value in self.dataarrays[index].attrs.items():
dataarray_out_temp.attrs[key] = value
if attrs is not None:
for key,value in attrs.items():
dataarray_out_temp.attrs[key] = value
archive_out.add_dataarray(dataarray_out_temp)
return archive_out
def apply_virtual(self,func,groupby=None,apply_merge=[],apply_merge_out=[],archive_out = None, inherit_attributes = True,extra_attributes={}, **kwargs):
if (archive_out is None) and (self.path_pickle is None):
raise ValueError('Please specify how the data should be written '
'out. In case you want to create a new archive returned by '
'this procedure. Specify path_archive_out="/path/to/dir". In case '
'you want to merge it to the current archive, you need to '
'dump the current archive so that the self.path_pickle is '
'set.')
temp_df = self.lib_dataarrays.reset_index()
if type(apply_merge) == pd.core.frame.DataFrame:
apply_merge_df = apply_merge
elif type(apply_merge) == list:
apply_merge_df = pd.DataFrame(apply_merge)
elif type(apply_merge) == dict:
apply_merge_df = pd.DataFrame.from_dict([dict(zip(apply_merge.keys(),v)) for v in itertools.product(*apply_merge.values())])
apply_merge_index = pd.MultiIndex.from_frame(apply_merge_df)
if type(apply_merge_out) == pd.core.frame.DataFrame:
apply_merge_out_df = apply_merge_out
elif type(apply_merge_out) == list:
apply_merge_out_df = pd.DataFrame(apply_merge_out)
elif type(apply_merge_out) == dict:
apply_merge_out_df = pd.DataFrame.from_dict([dict(zip(apply_merge_out.keys(),v)) for v in itertools.product(*apply_merge_out.values())])
if len(apply_merge_out_df) == 0:
print('creating automatic single output table')
apply_merge_out_dict = {}
for column in apply_merge_df.columns:
apply_merge_out_dict[column] = ['from__'+'__'.join(apply_merge_df[column].unique())]
apply_merge_out_df = pd.DataFrame.from_dict([dict(zip(apply_merge_out_dict.keys(),v)) for v in itertools.product(*apply_merge_out_dict.values())])
apply_merge_out_index = pd.MultiIndex.from_frame(apply_merge_out_df)
#apply_merge_pandas = pd.DataFrame([{'variable':'msshf_0001'},{'variable':'mslhf_0001','source':'cds_era5'}])
#self.lib_dataarrays.index.names
if groupby is None:
groupby = list(self.lib_dataarrays.index.names)
for key in apply_merge_df.columns:
groupby = list(filter((key).__ne__, groupby))
if archive_out is not None:
write_mode = 'add_to_external_archive'
else:
write_mode = 'add_to_current_archive'
if write_mode == 'create_new_archive':
archive_out = archive()
archive_out.dump(path_archive_out)
elif write_mode == 'add_to_current_archive':
archive_out = self
xr_outputs = {}
for index,group in temp_df.groupby(groupby):
if type(index) is not tuple:
index_multi = (index,)
else:
index_multi = index
group_columns = group.set_index(list(apply_merge_df.columns))
if type(group_columns.index) == pd.core.indexes.base.Index:
MultiIndex_from_Single_Index = lambda index: pd.MultiIndex.from_tuples([x.split()[::-1] for x in index])
group_columns.index = MultiIndex_from_Single_Index(group_columns.index)
all_variables_available_in_this_group = True
if len(apply_merge_index.intersection(group_columns.index)) != len(apply_merge_index):
all_variables_available_in_this_group = False
# if not group_columns.loc[apply_merge_index][groupby].isnull().any().any():
# all_variables_available_in_this_group = False
if all_variables_available_in_this_group:
dataarrays_for_func = []
for index_group,columns in group_columns.loc[apply_merge_index].iterrows():
index_array_dict = {**dict(columns),**dict(zip(apply_merge_index.names,index_group))}#**dict(zip(apply_merge_index.names,index_group))} **dict(zip(groupby,index)),
index_array_tuple_ordered = tuple([index_array_dict[key] for key in self.lib_dataarrays.index.names])
if (self.mode == 'passive') and (not self.lib_dataarrays.loc[index]['absolute_path'].isnull().any() ):
print('to be implemented')
import pdb; pdb.set_trace()
else:
dataarrays_for_func.append(self.dataarrays[index_array_tuple_ordered])
filenames_out = []
attributes = []
ifile = 0
for index_group,group_columns in apply_merge_out_df.iterrows():
index_array_out_dict = {**dict(zip(groupby,index_multi)),**dict(zip(apply_merge_out_df.columns,group_columns))}
attributes.append(index_array_out_dict)
index_array_out_tuple_ordered = tuple([index_array_out_dict[key] for key in self.lib_dataarrays.index.names])
if inherit_attributes:
for key,value in dataarrays_for_func[min(len(dataarrays_for_func),ifile)].attrs.items():
if (key not in self.lib_dataarrays.index.names) and \
( inherit_attributes or ((type(inherit_attributes) is list) and (key in inherit_attributes))) and \
(key not in attributes[-1].keys()):
attributes[-1][key] = value
for key,value in extra_attributes.items():
attributes[-1][key] = value
# if (archive_out.file_pattern is None):
# raise ValueError("I don't know how to write the data file to disk. Please set to file_pattern")
#filenames_out.append(os.path.dirname(archive_out.path_pickle)+'/'+''.join(np.array(list(zip(archive_out.file_pattern.split('"')[::2],[attributes[-1][key] for key in archive_out.file_pattern.split('"')[1::2]]+['']))).ravel()))
ifile +=1
# ifile = 0
# for index_group,group_columns in apply_merge_out_df.iterrows():
# index_array_out_tuple_ordered = tuple([attributes[ifile][key] for key in archive_out.lib_dataarrays.index.names])
# if index_array_out_tuple_ordered in archive_out.dataarrays.keys():
# print('forcing to overwrite data for ',index_array_out_tuple_ordered,)
# self.remove_by_index(index_array_out_tuple_ordered,delete_on_disk=True)
# ifile +=1
# for filename_out in filenames_out:
# os.system('mkdir -p '+os.path.dirname(filename_out))
temp_dataarrays = func(tuple(dataarrays_for_func))
# if type(temp_dataarrays) != tuple:
# print('this is a workaround in case we get a single dataarray instead of tuple of dataarrays from the wrapper function. This needs revision')
# idataarray = 0
# for key,value in attributes[idataarray].items():
# if key not in ['variable','absolute_path','path']:
# temp_dataarrays.attrs[key] = value
# if key == 'variable':
# temp_dataarrays.name = value
# #import pdb;pdb.set_trace()
# temp_dataarrays.to_netcdf(filenames_out[idataarray])
# temp_dataarrays.close()
# else:
# for idataarray in range(len(temp_dataarrays)):
# for key,value in attributes[idataarray].items():
# if key not in ['variable','absolute_path','path']:
# temp_dataarrays[idataarray].attrs[key] = value
# if key == 'variable':
# temp_dataarrays[idataarray].name = value
# for idataarray in range(len(temp_dataarrays)):
# temp_dataarrays[idataarray].to_netcdf(filenames_out[idataarray])
# temp_dataarrays[idataarray].close()
for idataarray,dataarray in enumerate(temp_dataarrays):
for key,value in attributes[idataarray].items():
dataarray.attrs[key] = value
archive_out.add_dataarray(dataarray)
if write_mode == 'create_new_archive':
return archive_out
# if type(group_columns_out.index) == pd.core.indexes.base.Index:
# MultiIndex_from_Single_Index = lambda index: pd.MultiIndex.from_tuples([x.split()[::-1] for x in index])
# group_columns_out.index = MultiIndex_from_Single_Index(group_columns_out.index)
#
# # not_all_arrays_available_in_this_group = group_columns.loc[apply_merge_out_index][groupby].isnull().any().any()
# # dataarrays_for_func = []
#
# import pdb;pdb.set_trace()
# for index_group,columns in group_columns_out.loc[apply_merge_out_index].iterrows():
# index_array_dict = {**dict(columns),**dict(zip(apply_merge_out_index.names,index_group))}#**dict(zip(apply_merge_index.names,index_group))} **dict(zip(groupby,index)),
# index_array_tuple_ordered = tuple([index_array_dict[key] for key in self.lib_dataarrays.index.names])
# # dataarrays_for_func.append(self.dataarrays[index_array_tuple_ordered])
#
#group_columns.loc[apply_merge_index]
# temp_df.groupby(self.lib_dataarrays.index.names
# if kwargs.keys() != 1:
# raise ValueError('length different from 1 is not allowed')
def apply_virtual2(self,func,divide_into_groups_extra=None, apply_groups_in=[],apply_groups_out=[], archive_out=None,inherit_attributes=True,extra_attributes={},**kwargs ):
divide_into_groups = []
for name in self.lib_dataarrays.index.names:
if ((name not in apply_groups.keys()) and (name not in apply_groups.keys())):
divide_into_groups.append(name)
for name in divide_into_groups_extra:
if name not in divide_into_groups:
divide_into_groups.append(name)
for index,group in self.lib_dataarrays.reset_index().groupby(divide_into_groups):
def allways_multi_index (idx):
if type(idx) is not tuple:
return (idx,)
else:
return idx
multi_index = allways_multi_index(index)
apply_groups_columns = []
for key in apply_groups_in.keys():
apply_groups_columns.append(key)
for key in apply_groups_out.keys():
if key not in apply_groups_columns:
apply_groups_columns.append(key)
apply_this_group_in = pd.dataframe(columns=apply_groups_columns)
def parse_to_dataframe(list_or_dict_or_dataframe):
if type(list_or_dict_or_dataframe) == pd.core.frame.DataFrame:
return list_or_dict_or_dataframe
elif type(list_or_dict_or_dataframe) == list:
return pd.DataFrame(list_or_dict_or_dataframe)
elif type(list_or_dict_or_dataframe) == dict:
return pd.DataFrame.from_dict([dict(zip(list_or_dict_or_dataframe.keys(),v)) for v in itertools.product(*list_or_dict_or_dataframe.values())])
apply_groups_in_df = parse_to_dataframe(apply_groups_in)
apply_groups_out_df = parse_to_dataframe(apply_groups_out)
# import pdb;pdb.set_trace()
# apply_merge_index = pd.MultiIndex.from_frame(apply_merge_df)
# if type(apply_groups_out) == pd.core.frame.DataFrame:
# apply_groups_out_df = apply_groups_out
# elif type(apply_groups_out) == list:
# apply_groups_out_df = pd.DataFrame(apply_groups_out)
# elif type(apply_groups_out) == dict:
# apply_groups_in_out_pre =
# if archive_out is not None:
# write_mode = 'add_to_external_archive'
# else:
# write_mode = 'add_to_current_archive'
# apply_groups_index
#def cdo(self,cdostring,):
def apply_func(self,
func,
#lib_dataarrays = self.lib_dataarrays
archive_out = None,
update_pickle = True,
force_recalculate=False,
*args,
**kwargs):
#apply_groups_in = {'variable':['aridity'],'source':[None]}
#apply_groups_out={'variable':['aridity'],'source':[lambda labels: labels[0].replace('historical','rcp45'),lambda labels: labels[0].replace('historical','rcp85')]}
#archive_out = pcd.archive()
#mode='xarray'
if (archive_out is None) and (self.path_pickle is None) and (mode != 'xarray'):
raise ValueError('Please specify how the data should be written '
'out. In case you want to create a new archive returned by '
'this procedure. Specify path_archive_out="/path/to/dir". In case '
'you want to merge it to the current archive, you need to '
'dump the current archive so that the self.path_pickle is '
'set.')
if archive_out is not None:
write_mode = 'add_to_external_archive'
else:
write_mode = 'add_to_current_archive'
# if write_mode == 'create_new_archive':
# archive_out = archive()
# archive_out.dump(path_archive_out)
#el
if write_mode == 'add_to_current_archive':
archive_out = self
apply_func_wrapper(
func,
lib_dataarrays = self.lib_dataarrays,
dataarrays = self.dataarrays,
archive_out = archive_out,
update_pickle=update_pickle,
force_recalculate=force_recalculate,
**kwargs,
)
if write_mode == 'create_new_archive':
return archive_out
def apply_func_old(self,func, xarray_function_wrapper=apply_func,dataarrays_wrapper = lambda *x: (*x,),groupby=None,apply_merge=[],apply_merge_out=[],archive_out = None,keep_in_memory_during_processing = False, inherit_attributes = False,extra_attributes={}, **kwargs):
if (archive_out is None) and (self.path_pickle is None):
raise ValueError('Please specify how the data should be written '
'out. In case you want to create a new archive returned by '
'this procedure. Specify path_archive_out="/path/to/dir". In case '
'you want to merge it to the current archive, you need to '
'dump the current archive so that the self.path_pickle is '
'set.')
temp_df = self.lib_dataarrays.reset_index()
if type(apply_merge) == pd.core.frame.DataFrame:
apply_merge_df = apply_merge
elif type(apply_merge) == list:
apply_merge_df = pd.DataFrame(apply_merge)
elif type(apply_merge) == dict:
apply_merge_df = pd.DataFrame.from_dict([dict(zip(apply_merge.keys(),v)) for v in itertools.product(*apply_merge.values())])
apply_merge_index = pd.MultiIndex.from_frame(apply_merge_df)
if type(apply_merge_out) == pd.core.frame.DataFrame:
apply_merge_out_df = apply_merge_out
elif type(apply_merge_out) == list:
apply_merge_out_df = pd.DataFrame(apply_merge_out)
elif type(apply_merge_out) == dict:
apply_merge_out_df = pd.DataFrame.from_dict([dict(zip(apply_merge_out.keys(),v)) for v in itertools.product(*apply_merge_out.values())])
if len(apply_merge_out_df) == 0:
print('creating automatic single output table')
apply_merge_out_dict = {}
for column in apply_merge_df.columns:
apply_merge_out_dict[column] = ['from__'+'__'.join(apply_merge_df[column].unique())]
apply_merge_out_df = pd.DataFrame.from_dict([dict(zip(apply_merge_out_dict.keys(),v)) for v in itertools.product(*apply_merge_out_dict.values())])
apply_merge_out_index = pd.MultiIndex.from_frame(apply_merge_out_df)
#apply_merge_pandas = pd.DataFrame([{'variable':'msshf_0001'},{'variable':'mslhf_0001','source':'cds_era5'}])
#self.lib_dataarrays.index.names
if groupby is None:
groupby = list(self.lib_dataarrays.index.names)
for key in apply_merge_df.columns:
groupby = list(filter((key).__ne__, groupby))
if archive_out is not None:
write_mode = 'add_to_external_archive'
else:
write_mode = 'add_to_current_archive'
if write_mode == 'create_new_archive':
archive_out = archive()
archive_out.dump(path_archive_out)
elif write_mode == 'add_to_current_archive':
archive_out = self
xr_outputs = {}
for index,group in temp_df.groupby(groupby):
group_columns = group.set_index(list(apply_merge_df.columns))
if type(group_columns.index) == pd.core.indexes.base.Index:
MultiIndex_from_Single_Index = lambda index: pd.MultiIndex.from_tuples([x.split()[::-1] for x in index])
group_columns.index = MultiIndex_from_Single_Index(group_columns.index)
all_variables_available_in_this_group = True
if len(apply_merge_index.intersection(group_columns.index)) != len(apply_merge_index):
all_variables_available_in_this_group = False
# if not group_columns.loc[apply_merge_index][groupby].isnull().any().any():
# all_variables_available_in_this_group = False
# import pdb; pdb.set_trace()
if all_variables_available_in_this_group:
dataarrays_for_func = []
for index_group,columns in group_columns.loc[apply_merge_index].iterrows():
index_array_dict = {**dict(columns),**dict(zip(apply_merge_index.names,index_group))}#**dict(zip(apply_merge_index.names,index_group))} **dict(zip(groupby,index)),
index_array_tuple_ordered = tuple([index_array_dict[key] for key in self.lib_dataarrays.index.names])
if (self.mode == 'passive') and (not self.lib_dataarrays.loc[index]['absolute_path'].isnull().any() ):
print('to be implemented')
import pdb; pdb.set_trace()
else:
dataarrays_for_func.append(self.dataarrays[index_array_tuple_ordered])
filenames_out = []
attributes = []
ifile = 0
for index_group,group_columns in apply_merge_out_df.iterrows():
index_array_out_dict = {**dict(zip(groupby,index)),**dict(zip(apply_merge_out_df.columns,group_columns))}
attributes.append(index_array_out_dict)
index_array_out_tuple_ordered = tuple([index_array_out_dict[key] for key in self.lib_dataarrays.index.names])
if inherit_attributes:
for key,value in dataarrays_for_func[min(len(dataarrays_for_func),ifile)].attrs.items():
if (key not in self.lib_dataarrays.index.names) and \
( inherit_attributes or ((type(inherit_attributes) is list) and (key in inherit_attributes))) and \
(key not in attributes[-1].keys()):
attributes[-1][key] = value
for key,value in extra_attributes.items():
attributes[-1][key] = value
if (archive_out.file_pattern is None):
raise ValueError("I don't know how to write the data file to disk. Please set to file_pattern")
filenames_out.append(os.path.dirname(archive_out.path_pickle)+'/'+''.join(np.array(list(zip(archive_out.file_pattern.split('"')[::2],[attributes[-1][key] for key in archive_out.file_pattern.split('"')[1::2]]+['']))).ravel()))
ifile +=1
for ixr_out,filename_out in enumerate(filenames_out):
index_array_out_tuple_ordered = tuple([attributes[ixr_out][key] for key in archive_out.lib_dataarrays.index.names])
if index_array_out_tuple_ordered in archive_out.dataarrays.keys():
self.remove_by_index(index_array_out_tuple_ordered,delete_on_disk=True)
for filename_out in filenames_out:
os.system('mkdir -p '+os.path.dirname(filename_out))
if not keep_in_memory_during_processing:
xarray_function_wrapper(func,dataarrays_wrapper(*tuple(dataarrays_for_func)),filenames_out=filenames_out,attributes = attributes, release=True, **kwargs)
else:
temp_dataarrays = xarray_function_wrapper(func,dataarrays_wrapper(*tuple(dataarrays_for_func)),**kwargs)
if type(temp_dataarrays) != tuple:
print('this is a workaround in case we get a single dataarray instead of tuple of dataarrays from the wrapper function. This needs revision')
idataarray = 0
for key,value in attributes[idataarray].items():
if key not in self.not_dataarray_attributes:
temp_dataarrays.attrs[key] = value
if key == 'variable':
temp_dataarrays.name = value
#import pdb;pdb.set_trace()
os.system('rm '+filenames_out[idataarray])
temp_dataarrays.to_netcdf(filenames_out[idataarray])
temp_dataarrays.close()
else:
for idataarray in range(len(temp_dataarrays)):
for key,value in attributes[idataarray].items():
if key not in self.not_dataarray_attributes:
temp_dataarrays[idataarray].attrs[key] = value
if key == 'variable':
temp_dataarrays[idataarray].name = value
for idataarray in range(len(temp_dataarrays)):
os.system('rm '+filenames_out[idataarray])
temp_dataarrays[idataarray].to_netcdf(filenames_out[idataarray])
temp_dataarrays[idataarray].close()
for ixr_out,filename_out in enumerate(filenames_out):
archive_out.add_dataarray(filename_out)
if write_mode == 'create_new_archive':
return archive_out
# if type(group_columns_out.index) == pd.core.indexes.base.Index:
# MultiIndex_from_Single_Index = lambda index: pd.MultiIndex.from_tuples([x.split()[::-1] for x in index])
# group_columns_out.index = MultiIndex_from_Single_Index(group_columns_out.index)
#
# # not_all_arrays_available_in_this_group = group_columns.loc[apply_merge_out_index][groupby].isnull().any().any()
# # dataarrays_for_func = []
#
# import pdb;pdb.set_trace()
# for index_group,columns in group_columns_out.loc[apply_merge_out_index].iterrows():
# index_array_dict = {**dict(columns),**dict(zip(apply_merge_out_index.names,index_group))}#**dict(zip(apply_merge_index.names,index_group))} **dict(zip(groupby,index)),
# index_array_tuple_ordered = tuple([index_array_dict[key] for key in self.lib_dataarrays.index.names])
# # dataarrays_for_func.append(self.dataarrays[index_array_tuple_ordered])
#
#group_columns.loc[apply_merge_index]
# temp_df.groupby(self.lib_dataarrays.index.names
# if kwargs.keys() != 1:
# raise ValueError('length different from 1 is not allowed')
def update(self,
library_path=None,
query=None,
force_overwrite_dataarrays = False,
force_overwrite_pickle=False,
extra_attributes={},
dump_floating_dataarrays=False,**kwargs):
"""
perform quick attribute parameter updates and/or dump unsynced information to disc on request.
"""
if library_path is not None:
if library_path[-4:] == '.pkl':
lib_basename = os.path.basename(os.path.abspath(library_path))
lib_dirname = os.path.dirname(os.path.abspath(library_path))
elif library_path[-1:] == '/':
lib_basename = 'master.pkl'
lib_dirname = os.path.abspath(library_path)
else:
raise ValueError('Please provide a path that either ends on ".pkl" (indicating specific picklefile) or "/" (full archive dump at master.pkl)')
if os.path.isfile(lib_dirname+'/'+lib_basename) and (not force_overwrite_pickle):
raise IOError('pickle file exists. Please use force_overwrite_pickle = True, or specify the full pickle name as the library_path.')
# when the self.path_pickle exists, it is assumed that this file needs to be updated
self.path_pickle = lib_dirname+'/'+lib_basename
# else:
# lib_dirname = os.path.dirname(self.path_pickle)
# if self.path_pickle is None:
# raise ValueError("I can't track the pickle name. Please specify a pkl file or directory with library_path")
for key,value in kwargs.items():
if key not in self.settings_keys:
raise ValueError('"'+key+'" is not a known setting.')
else:
self.__dict__[key] = value
if query is not None:
read_lib_dataarrays = self.lib_dataarrays.query(query).copy()
else:
read_lib_dataarrays = self.lib_dataarrays.copy()
if len(extra_attributes) > 0:
for idx,row in read_lib_dataarrays.iterrows():
# # this should become part of a save procedure updating the attributes of the netcdf on disc
# if ('absolute_path' in row.keys()) and ('ncvariable' in row.keys()):
# variable=idx[self.lib_dataarrays.index.names.index('variable')]
# self.remove_by_index(idx)
# ncin = nc4.open_dataset(row['absolute_path'])
# for attribute_key,attribute_value in extra_attributes.items():
# ncin[idx[self.lib_dataarrays.index.names.index('variable')]].setncattr(attribute_key,attribute_value)
# ncin.close()
# self.add_dataarray(row['absolute_path'],variable,ncvariable=row['ncvariable'])
# else:
dataarray_temp = self.dataarrays[idx]
attributes_temp = {**dict(zip(read_lib_dataarrays.index.names,idx)),**row}
for attribute_key,attribute_value in extra_attributes.items():
attributes_temp[attribute_key] = attribute_value
# extra_attributes_plus_path = extra_attributes.copy()
# if ( ('path' in row.keys()) and (type(row['path']) == str)):
# extra_attributes_plus_path['path'] =row['path']
# if ( ('absolute_path' in row.keys()) and (type(row['absolute_path']) == str)):
# extra_attributes_plus_path['absolute_path'] =row['absolute_path']
self.remove_by_index(idx,update_pickle=False)
self.add_dataarray(dataarray_temp,**attributes_temp)
# for key,value in extra_attributes.items():
# self.lib_dataarrays.loc[read_lib_dataarrays.index][key] = value
if 'path_pickle' in self.__dict__.keys():
os.system('mkdir -p '+os.path.dirname(self.path_pickle))
read_lib_dataarrays = self.lib_dataarrays.copy()
for idx,columns in read_lib_dataarrays.iterrows():
if ( ('absolute_path' not in columns.keys()) or (type(columns['absolute_path']) != str)) or \
( ('path' not in columns.keys()) or (type(columns['path']) != str)):
if dump_floating_dataarrays:
#parse filename according to file_pattern
if 'path_pickle' not in self.__dict__.keys():
raise ValueError ('self.path_pickle is not set')
fnout = os.path.dirname(self.path_pickle)+'/'+''.join(np.array(list(zip(self.file_pattern.split('"')[::2],[{**dict(zip(self.lib_dataarrays.index.names,idx)),**columns}[key] for key in self.file_pattern.split('"')[1::2]]+['']))).ravel())
print("File pointer for ",idx," is not known, so I'm dumping a new file under ",fnout)
#fnout = self.lib_dataarrays.loc[idx]['absolute_path']
if (not force_overwrite_dataarrays) and (os.path.isfile(fnout)):
raise IOError(fnout+' exists. Use force_overwrite_dataarrays to overwrite file')
os.system('mkdir -p '+os.path.dirname(fnout))
# self.dataarrays[idx].attrs['absolute_path'] = fnout
for key,value in dict(columns).items():
self.dataarrays[idx]
for key,value in dict(columns).items():
if key not in [
'variable',
'absolute_path',
'absolute_path_for_reading',
'absolute_path_as_cache',
'path',
'available'
#'dataarray_pointer',
]:
if type(value) == bool:
self.dataarrays[idx].attrs[key] = int(value)
else:
self.dataarrays[idx].attrs[key] = value
if key == 'variable':
self.dataarrays[idx].name = value
os.system('rm '+fnout)
self.dataarrays[idx].to_netcdf(fnout);print('file written to: '+fnout)
self.remove_by_index(idx,update_pickle=False)
self.add_dataarray(fnout)
#self.dataarrays[idx]
# key = 'path'
# if key not in self.lib_dataarrays.columns:
# self.lib_dataarrays[key] = ''
# self.lib_dataarrays.loc[idx]['path'] = './'
# note that path and absolute_path are not written to the netcdf file above, but it is available virtually for convenience
#self.lib_dataarrays['absolute_path'].loc[idx] = fnout
#self.dataarrays[idx].attrs['path'] = self.lib_dataarrays.loc[idx]['path']
else:
print("Assuming variable for ",idx," exists in file "+columns['absolute_path'])
if 'path' not in self.lib_dataarrays.columns:
self.lib_dataarrays['path'] = None
if 'path_pickle' in self.__dict__.keys():
if ((columns['absolute_path'] is not None) and (type(columns['absolute_path']) is str)):
if 'path' not in self.lib_dataarrays.columns:
self.lib_dataarrays['path'] = None
self.lib_dataarrays['path'].loc[idx] = os.path.relpath(columns['absolute_path'],os.path.dirname(self.path_pickle))
print("relative file path to "+os.path.dirname(self.path_pickle)+" is "+self.lib_dataarrays['path'].loc[idx])
#os.path.commonprefix([columns['absolute_path'],lib_dirname])
elif ((columns['path'] is not None) and (type(columns['path']) is str)):
if 'absolute_path' not in self.lib_dataarrays.columns:
self.lib_dataarrays['absolute_path'] = None
self.lib_dataarrays['absolute_path'].loc[idx] = os.path.dirname(self.path_pickle)+'/'+columns['path']
if ((columns['absolute_path_for_reading'] is None) or (type(columns['absolute_path_for_reading']) is not str)):
if 'absolute_path_for_reading' not in self.lib_dataarrays.columns:
self.lib_dataarrays['absolute_path_for_reading'] = None
self.lib_dataarrays['absolute_path_for_reading'].loc[idx] = os.path.dirname(self.path_pickle)+'/'+columns['path']
#print("absolute file path to "+os.path.dirname(self.path_pickle)+" is "+self.lib_dataarrays['path'].loc[idx])
if ('path_pickle' in self.__dict__.keys()):
self.lib_dataarrays.to_pickle(self.path_pickle)
with open(self.path_pickle+'.yaml','w') as file:
yaml.dump([[key,self.__dict__[key]] for key in self.settings_keys],file)
def load(
self,
path,
path_settings = None,
#file_pattern = lambda columns: columns['variable']+'_'+columns['source']+'_'+columns['time']+'_'+columns['space']+'.nc',
file_pattern = None,
query= None,
extra_attributes={},
skip_unavailable=False,
add_file_pattern_matches=False,
release_dataarray_pointer =False,
cache_to_tempdir=False,
cache_to_ram=False,
reset_space=False,
initialize_if_missing=True,
**kwargs):
if type(path).__name__ == 'list':
# eg., -> files_wildcard = '*_*_*_*.nc'
print('Guessing files from file list...(this procedure may need revision)')
#allkwargs = {**dict(zip(lib_dataarrays_temp.index.names, index)),**dict(dataarray),**kwargs}
filenames = path
for filename in filenames:
self.add_dataarray(xr.open_dataarray(filename),absolute_path=filename,skip_unavailable=False,release_dataarray_pointer =True,cache_to_tempdir=False,reset_space=reset_space,**extra_attributes)
# elif os.path.isfile(path):
# print('pkl file '+path+' detected. Listing files from there.' )
# lib_dataarrays_temp = pd.read_pickle(path)
# if query is not None:
# print('performing query subselection: '+query)
# lib_dataarrays_temp = lib_dataarrays_temp.query(query,engine='python')
# for index,dataarray in lib_dataarrays_temp.iterrows():
# allkwargs = {**dict(zip(lib_dataarrays_temp.index.names, index)),**dict(dataarray),**kwargs}
# if dataarray.path[0] == '/':
# absolute_file_path = dataarray.path
# else:
# absolute_file_path = os.path.dirname(path)+'/'+dataarray.path
# allkwargs['absolute_path'] = absolute_file_path
# if os.path.isfile(absolute_file_path):
# print('adding '+absolute_file_path+' with additional attributes: ',allkwargs)
# try:
# xrin = xr.open_dataarray(absolute_file_path)
# except ValueError:
# # try to open the variable out of dataset
# variable = index[0]
# xrin = xr.open_dataset(absolute_file_path)[variable]
# self.add_dataarray(xrin,release_dataarray_pointer =False,**allkwargs)
# else:
# print('Warning. Could not add file '+absolute_file_path+' because it does not exist.')
# elif os.path.isdir(path):
# print('Guessing files from directory...')
# self.lib_dirname = path
# self.lib_basename = 'master.pkl'
elif type(path) == str:
if path[-4:] == '.pkl':
lib_basename = os.path.basename(os.path.abspath(path))
lib_dirname = os.path.dirname(os.path.abspath(path))
elif path[-1:] == '/':
lib_basename = 'master.pkl'
lib_dirname = os.path.abspath(path)
else:
raise ValueError('Please provide a path that either ends on ".pkl" (indicating specific picklefile) or "/" (full archive dump at master.pkl)')
else:
raise IOError ('path '+path+ ' does not exist.')
# os.system('mkdir -p '+path)
# file_pattern = '"variable"_"columns"_"time"_"space".nc'
temp_path_pickle = lib_dirname+'/'+lib_basename
print('apply settings according to yaml file and kwargs')
if path_settings is None:
path_settings = temp_path_pickle+'.yaml'
elif not os.path.isfile(path_settings):
raise IOError('Settings file '+path_settings+ ' not found.')
print(temp_path_pickle)
if os.path.isfile(temp_path_pickle):
self.path_pickle = temp_path_pickle
if (not os.path.isfile(temp_path_pickle)) and initialize_if_missing:
if 'file_pattern' in kwargs.keys():
self.update(temp_path_pickle,file_pattern=kwargs['file_pattern'])
else:
self.update(temp_path_pickle)
if os.path.isfile(path_settings):
print('settings file found')
with open(path_settings) as file:
for key,value in yaml.load(file):
if key in self.settings_keys:
self.__dict__[key] = value
for key,value in kwargs.items():
if key not in self.settings_keys:
raise ValueError('"'+key+'" is not a known setting.')
else:
self.__dict__[key] = value
# if file_pattern is not None:
# print('Overriding settings "file_pattern" with ', file_pattern,' from arguments')
# self.file_pattern = file_pattern
print('reading the dataarrays from the pickle file')
if type(query) == str:
read_lib_dataarrays = pd.read_pickle(temp_path_pickle).query(query,engine='python')
elif query is not None:
read_lib_dataarrays = query(pd.read_pickle(temp_path_pickle))
else:
#query is None
read_lib_dataarrays = pd.read_pickle(temp_path_pickle)
for column in read_lib_dataarrays.columns:
if column not in self.lib_dataarrays.columns:
print('adding column '+column+' from the original set to avoid errors when the query result is empty and gets queried again')
self.lib_dataarrays[column] = ""
for index,columns in read_lib_dataarrays.iterrows():
absolute_path = None
if 'path' in read_lib_dataarrays.columns:
absolute_path = lib_dirname+'/'+columns['path']
elif 'absolute_path' in read_lib_dataarrays.columns:
absolute_path = columns['absolute_path']
columns['path'] = os.path.relpath(columns['absolute_path'],os.path.dirname(self.path_pickle))
if (absolute_path is not None) and (absolute_path not in self.lib_dataarrays.absolute_path):
#if index[0] == 'mslhf_0001':
print('Opening file : '+absolute_path)
self.add_dataarray(absolute_path,skip_unavailable=skip_unavailable,release_dataarray_pointer =True,cache_to_tempdir=False,cache_to_ram=cache_to_ram,**({**dict(zip(read_lib_dataarrays.index.names,index)),**columns}),**extra_attributes)
if add_file_pattern_matches and (self.file_pattern is not None):
files_wildcard = lib_dirname+'/'+''.join(np.array(list(zip(self.file_pattern.split('"')[::2],['*']*len(self.file_pattern.split('"')[1::2])+['']))).ravel())
print('file_pattern is '+self.file_pattern+' and add_file_pattern_matches == True, so scanning and adding files that match the wildcard: ',files_wildcard+' that are not in the library yet')
# eg., -> files_wildcard = '*_*_*_*.nc'
filenames = glob.glob(files_wildcard)
for filename in filenames:
if filename not in self.lib_dataarrays.absolute_path:
path = os.path.relpath(filename,os.path.dirname(temp_path_pickle))
print('Opening file : '+filename)
self.add_dataarray(filename,skip_unavailable=skip_unavailable, release_dataarray_pointer = True, cache_to_tempdir=False,path=path,cache_to_ram=cache_to_ram,reset_space=reset_space,**extra_attributes)
# import pdb; pdb.set_trace()
if type(query) == str:
read_lib_dataarrays = self.lib_dataarrays.query(query,engine='python').copy()
elif (query is not None):
read_lib_dataarrays = query(self.lib_dataarrays)
else:
read_lib_dataarrays = self.lib_dataarrays.copy()
for idx,columns in self.lib_dataarrays.iterrows():
self.remove_by_index(idx,update_pickle=False)
for idx,columns in read_lib_dataarrays.iterrows():
absolute_path = None
# import pdb; pdb.set_trace()
if 'path' in read_lib_dataarrays.columns:
absolute_path = lib_dirname+'/'+columns['path']
elif 'absolute_path' in read_lib_dataarrays.columns:
absolute_path = columns['absolute_path']
if (absolute_path is not None) and (absolute_path not in self.lib_dataarrays.absolute_path):
#if index[0] == 'mslhf_0001':
print('Opening file : '+absolute_path)
self.add_dataarray(absolute_path,skip_unavailable=skip_unavailable,release_dataarray_pointer =release_dataarray_pointer,cache_to_tempdir=cache_to_tempdir,cache_to_ram=cache_to_ram,reset_space=reset_space,**({**dict(zip(read_lib_dataarrays.index.names,idx)),**columns}),**extra_attributes)
# def dataarrays_merge_apply(self,attrs,function):
# if len(attrs.keys()) > 1:
# print('not tested, please check')
# import pdb; pdb.set_trace()
#
# index_keys_groupby = []
# index_keys_nongroupby = []
# for key in self.lib_dataarrays.index.names:
# if key in attrs.keys():
# index_keys_nongroupby.append(key)
# else:
# index_keys_groupby.append(key)
# lfirst = True
# archive_out = archive()
# for index_groupby,group_lib_dataarrays in self.lib_dataarrays.groupby(index_keys_groupby):
# group_dataarrays = [self.dataarrays[key] for key in group_lib_dataarrays.index]
# dataarray_out = function(group_dataarrays)
# for column in group_lib_dataarrays.columns:
# # if lib_dataarrays[column].unique() > 0:
# # aparently uniform attributes are taken over, so we can detect from the destination attributies
# if (column not in dataarray_out.attrs.keys()) and (column != 'path'):
# dataarray_out.coords[column] = (attrs.keys(), group_lib_dataarrays[column].values)
# for key,value in attrs.items():
# dataarray_out.attrs[key] = value
# archive_out.add_dataarray(dataarray_out)
# return archive_out
def dataarrays_merge(self,attrs):
if len(attrs.keys()) > 1:
print('not tested, please check')
import pdb; pdb.set_trace()
index_keys_groupby = []
index_keys_nongroupby = []
for key in self.lib_dataarrays.index.names:
if key in attrs.keys():
index_keys_nongroupby.append(key)
else:
index_keys_groupby.append(key)
lfirst = True
archive_out = archive()
for index_groupby,group_lib_dataarrays in self.lib_dataarrays.groupby(index_keys_groupby):
group_dataarrays = [self.dataarrays[key] for key in group_lib_dataarrays.index]
dataarray_out = xr.concat(group_dataarrays,dim=group_lib_dataarrays.loc[index_groupby].index)
for column in group_lib_dataarrays.columns:
# if lib_dataarrays[column].unique() > 0:
# aparently uniform attributes are taken over, so we can detect from the destination attributies
if (column not in dataarray_out.attrs.keys()) and (column != 'path'):
dataarray_out.coords[column] = (attrs.keys(), group_lib_dataarrays[column].values)
for key,value in attrs.items():
dataarray_out.attrs[key] = value
archive_out.add_dataarray(dataarray_out)
return archive_out
# group_dataarrays = self.lib_dataarrays.loc[index_groupby]
# xr.concat(group_dataarrays.values(),dim=group_dataarrays.index)
# if lfirst:
# dataarray_out = self.dataarrays
# for key,value in merges.items():
# self.lib_dataarrays.groupby:
# def merge_time_in_space(self,time,datetimes,space):
# archive_temp = self.dataarrays_apply(lambda x: x.reindex({'time':datetimes}),attrs={'time':time})
# archive_out = archive()
# import pdb; pdb.set_trace()
# for key in
# self.lib_dataarrays
# self.timeframes = pd.DataFrame(index=empty_multiindex(['timeframe','grid','source','variable']),columns=['model','experiment','ensemble_member','start','end','start_clip','end_clip']).iloc[1:]
# self.timeframes_pages = pd.DataFrame(index = empty_multiindex(['timeframe','grid','source','variable','start','end']),columns = list(self.pages.columns)+['start_clip','end_clip']).iloc[1:]
# def add_source(self,source = 'gsod_historical',pathfile = ''):
# if source == 'ghcn_historical':
# lib_temp = pd.read_fwf(pathfile,names=['locid','lat','lon','variable','yearstart','yearend'])
# self.lookupvar[source] = {'pr_dailysum':'PRCP'}
# for var,sourcevar in self.lookupvar[source].items():
#
# lib_temp['variable'].loc[lib_temp['variable'] == sourcevar] = var # = lib_temp.var.str.rename(columns={0:self.lookupvar[source][seriesvar]})
# else:
# raise ValueError ('Source '+source+' not supported.')
#
# lib_temp['source'] = source
# lib_temp = lib_temp.set_index(['locid','source','variable'])
#
# self.lib = pd.concat([self.lib,lib_temp])
|
hendrikwout/pynacolada
|
pynacolada/archive.py
|
Python
|
gpl-3.0
| 86,666
|
[
"NetCDF"
] |
8de8c84fd6a8c7fe0b99de80faefa17be20a788416bf789008b3e83c640e5b21
|
################################################################################
#
# esc_lib.py
#
# Library of electronic-structure related code.
#
################################################################################
#
# Copyright 2012 Kane O'Donnell
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# NOTES
#
# 1. Internally we *always* use atomic units (bohr, hartree, etc) and provide
# converter routines to deal with other common units such as ang and eV.
#
################################################################################
from __future__ import division
from numpy import array, zeros, sqrt, reshape, mat, pi, matrix, cos, sin, exp, arange, arccos, arctan2, complex, polyfit, poly1d, loadtxt, amin, amax, argmin, argmax
from random import random as rand
from numpy.linalg import norm, inv
from numpy.fft import fftn, ifftn
from scipy.interpolate import interp1d
from scipy.integrate import quad
from scipy.special import sph_harm
from scipy.optimize import leastsq, curve_fit, fmin_slsqp
#from scipy.optimize import minimize
#from libabitools import io, wave, spectra
from scipy.io import netcdf
import os
# Debugging flag - set to 1 to see debug messages.
DEBUG=1
# Element dictionaries
elements = { 1 : "H", 2 : "He", 3 : "Li", 4 : "Be", 5 : "B", 6 : "C", 7 : "N" , 8 : "O", 9 : "F", 29 : "Cu", 14 : "Si" , 13 : "Al", 16: "S", 83 : "Bi"}
xsf_keywords = ["ANIMSTEPS", "CRYSTAL", "ATOMS", "PRIMVEC", "PRIMCOORD"]
bond_lengths = {"CH" : 2.06, "CC" : 2.91, "NC" : 2.78, "NH" : 1.91, "HH" : 2.27}
def getElementZ(elstr):
""" Z = getElementZ(elstr)
Given a string that contains either a Z number OR an element
abbreviation like Cu, MG, whatever, generates and returns the
appropriate integer Z.
"""
# Is it an integer?
try:
Z = int(elstr)
return Z
except ValueError:
# Not an integer.
if elstr.title() not in elements.values():
raise ESCError("getELementZ", "Element %s is not in the elements dictionary. Returning -1." % elstr)
return -1
else:
for key, value in elements.items():
if elstr.title() == value:
return key
def getBondLengths(positions, species, cutoff=3.0, give_species=False):
""" bonds = getBondLengths(positions, species, cutoff=3.0, give_species=False)
Returns a list of bond specs [i, j, length] for all pairwise
distances less than the cutoff distance (default: 3.0 Bohr) for
the specified animation step (default: first step, ie 0).
If give_species=True, the bond spec includes the species abbreviation:
[i, Zi, j, Zj, length]
"""
bonds = []
for i in range(len(positions)):
for j in range(i, len(positions)):
if i is not j:
pair = norm(positions[i] - positions[j])
if pair < cutoff:
if give_species:
bonds.append([i,elements[species[i]], j, elements[species[j]], pair])
else:
bonds.append([i, j, pair])
return bonds
def gl_smear(x, y, xs, gw=None, lw=None, cutoff=10):
""" xs, ys = gl_smear(x, y, xs, gw=None, lw=None,cutoff=10)
Using a set of reference values given by x and y, create a spectrum ys
over the range xs by applying a Gaussian/Lorentzian smearing algorithm:
ys(xs) = sum(x) [y(x) * {gaussian(x-xs, gw) + lorentzian(x-xs, lw)}]
The gw and lw parameters are the width characteristic for the gaussian
and lorentzian respectively. If gw or lw is None, a dynamic smearing will
be used:
gw(xs[i]) = 0.25 * (xs[i+1] - xs[i-1])
The cutoff parameter can be used to set a low-pass filter on the y data - if
y > cutoff, it won't be contributed to the spectrum.
NOTE: This gl_smear is incorrect - needs a weighting factor between the gaussian
and lorentzian to maintain normalization (amongst other things).
"""
# Constants to speed up function evaluation
g_pre = 1.0 / sqrt(2 * pi)
l_pre = 1.0 / pi
# Need some functions here
def gaussian(p,w):
return g_pre / w * exp(-0.5 * (p ** 2) / (w ** 2))
def lorentzian(p,w):
return l_pre * w / (p ** 2 + w ** 2)
ys = zeros((len(xs)))
dg = []
dl = []
# Set smearing width
if gw is None:
for i in range(len(xs)):
if i == 0:
dg.append(abs(0.5 * (xs[1] - xs[0])))
elif i == len(xs) - 1:
dg.append(abs(0.5 * (xs[-1] - xs[-2])))
else:
dg.append(abs(0.25 * (xs[i+1] - xs[i-1])))
else:
dg = [gw] * len(xs)
if lw is None:
for i in range(len(xs)):
if i == 0:
dl.append(abs(0.5 * (xs[1] - xs[0])))
elif i == len(xs) - 1:
dl.append(abs(0.5 * (xs[-1] - xs[-2])))
else:
dl.append(abs(0.25 * (xs[i+1] - xs[i-1])))
else:
dl = [lw] * len(xs)
# Calculate combined smeared function
for i, xsi in enumerate(xs):
for xi,yi in zip(x,y):
if yi < cutoff:
ys[i] = ys[i] + yi * (gaussian(xsi - xi, dg[i]) + lorentzian(xsi - xi, dl[i]))
return ys
def rotate_positions(positions, filename, fileopt=0):
""" new_positions = rotate_positions(positions, filename, fileopt=0)
Rotates specified coordinates around a specified axis and returns the new positions.
Can be used, for example, to rotate portions of a molecule. The rotations come
from a file in the form:
i1 i2 i3 theta
...
...
(if fileopt = 0) where i1 is the atomic index to be rotated and the axis is
formed as the vector between atoms i2 and i3. Rotation is CLOCKWISE
viewed along the axis.
If fileopt is 1, the format is i1, x1, x2, x3, o1,o2, o3, theta, where xi
give the actual axis and the oi give the origin of the vector xi for the
rotation. The units of the axis x don't matter, vector o must be given
in atomic units (bohr).
Comments marked with # or ! are ignored in the rotation file.
NOTE: the indices i1, i2, etc are 1-based, to align with XCrysden, not
0-based. We convert inside this routine.
"""
new_positions = positions
f = open(filename)
lines = f.readlines()
lines = remove_comments(lines, "#")
lines = remove_comments(lines, "!")
indices = []
axes = []
origins = []
angles = []
for line in lines:
bits = line.split()
indices.append(int(bits[0]) - 1) # remember to subtract 1 from the indices
# since python is 0-based!
if fileopt == 0:
a = int(bits[1]) - 1
b = int(bits[2]) - 1
axes.append(positions[b] - positions[a])
origins.append(positions[a])
angles.append(float(bits[3]))
elif fileopt == 1:
axes.append(array([float(x) for x in bits[1:4]]))
origins.append(array([float(x) for x in bits[4:7]]))
angles.append(float(bits[7]))
else:
print "rotate_positions: ERROR - fileopt must be 0 or 1, not %d" % fileopt
for i,a,o,t in zip(indices, axes, origins,angles):
new_positions[i] = rotate(positions[i], a, o, t)
return new_positions
def rotate(pos, axis, origin, angle):
""" new_vec = rotate(pos, axis, origin, angle)
Rotates a coordinate pos by the specified angle, CLOCKWISE looking along
axis. Origin of the rotation axis vector is given by the origin parameter,
the position coordinate pos is assumed to have an origin of (0,0,0) and is
converted to a vector with respect to origin before the calculation and
restored afterwards.
"""
# Find the vector from origin to pos.
v = pos - origin
# Convert angle to radians
r = float(angle) * pi / 180
# Make sure axis is normalized
ux, uy, uz = axis/norm(axis)
# Matrices
UU = matrix([[ux * ux, ux * uy, ux * uz],
[ux * uy, uy * uy, uy * uz],
[ux * uz, uz * uy, uz * uz]])
UX = matrix([[0, -uz, uy],
[uz, 0, -ux],
[-uy, ux, 0]])
I = matrix([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
R = cos(r) * I + sin(r) * UX + (1.0 - cos(r)) * UU
# Apply matrix to column vector of v and restore to a coordinate
return array(R * matrix(v).T).flatten() + origin
def gaussian_convolute(x, y, width):
""" convolved = gaussian_convolute(x, y, width)
Convolute the data with a gaussian of width w.
"""
yc = zeros(y.shape)
for j in range(len(yc)):
cursum = 0.0
for i in range(len(x)):
cursum += y[i] * (1.0 / (width * sqrt(2 * pi))) * exp(-1.0 * (x[j] - x[i]) ** 2 / (2.0 * width ** 2))
yc[j] = cursum
return yc
def normalize_integral(x, y):
""" yn = normalize_integral(x, y)
Normalizes y to give a unit integral when integrated over x.
"""
intsum = 0.0
for i in range(len(x)-1):
intsum += 0.5 * (x[i+1] - x[i]) * (y[i+1] + y[i])
return y / intsum
def integral_smoothing(data, period, pbc=False):
""" smooth_data = integral_smoothing(data, period)
Assumes data is two column with x values data[:,0] and y values data[:,1].
Then the new y value at x is an integral of the old data within a single
period centred on x. Period is in the same units as the data x-axis itself.
Set pbs to True (default False) to integrate past the edge and wrap around
(ie periodic boundary conditions).
The only condition imposed is that the period must be less than the span
of x - I haven't written "multi-wrap-around" code here so it can't handle
multiply-periodic spans.
"""
# Check the period is less than the span of x.
if period > abs(amax(data[:,0]) - amin(data[:,0])):
print "ERROR (integral_smoothing): Period greater than span of x. This is not allowed."
return None
# Make an interpolater and a new data array. We also need to know the
# bounds of x to use in the case of periodic boundary conditions.
smooth = zeros(data.shape)
func = interp1d(data[:,0], data[:,1])
xmin = amin(data[:,0])
xmax = amax(data[:,0])
lx = xmin
rx = xmax
halfp = float(period) / 2.0
# For each point compute the integral.
for i, x in enumerate(data[:,0]):
smooth[i,0] = x
if x - halfp < xmin:
lx = xmin
else:
lx = x - halfp
if x + halfp > xmax:
rx = xmax
else:
rx = x + halfp
if DEBUG:
print "At point %f, integrating between %f and %f." % (x, lx, rx)
# Integrate the bit between lx and rx
this_int = quad(func, lx, rx)[0]
if DEBUG:
print "Result is %f." % this_int
# If PBCs are used, need to add any bits that pass the boundaries.
if pbc is True:
if x - halfp < xmin:
lx = xmax - (halfp + xmin - x)
rx = xmax
this_int += quad(func, lx, rx)[0]
if DEBUG:
print "Added PBC extra bit %f." % (this_int)
if x + halfp > xmax:
lx = xmin
rx = xmin + halfp - xmax + x
this_int += quad(func, lx, rx)[0]
if DEBUG:
print "Added PBC extra bit %f." % (this_int)
smooth[i,1] = this_int / period
return smooth
def read_xy(filename, comment_delim="#"):
""" data = read_xy(filename, comment_delim="#")
Reads a multi-column xy file, remove comments, returns as an array
NOTE: This is redundant now that numpy has a loadtxt() method: should replace
all instances with loadtxt.
"""
f = open(filename, 'r')
lines = f.readlines()
lines = remove_comments(lines, comment_delim)
f.close()
data = zeros((len(lines), len(lines[0].split())))
for i, line in enumerate(lines):
for j,bit in enumerate(line.split()):
data[i,j] = float(bit)
return data
def sum_atoms(data, start, finish, option="LeaveFirstColumn"):
""" summed_data = sum_atoms(data, start, finish)
Assuming an array of the format data[i,j,k], sum over the range in i
and return an array in the other two indices.
Option "LeaveFirstColumn" prevents summing over the first column and simply
copies the first set of values (for i = start).
"""
s = zeros((data.shape[1],data.shape[2]))
if option == "LeaveFirstColumn":
cs = 1
s[:,0] = data[start,:,0]
else:
cs = 0
for i in range(start,finish):
s[:,cs:] = s[:,cs:] + data[i,:,cs:]
return s
def read_seq_xy(filename, comment_delim="#", option=None):
""" data = read_seq_xy(filename, comment_delim="#", option=None)
Reads xy files where the individual data sets are listed in two columns
sequentially. We assume (but don't check) that all the sets are the same
size. The returned data has the form:
data[i,j,k]
where i runs over the sets, j runs over the points and k runs over the
columns of data, so 0 is the x axis, 1 is the y1 axis etc.
Note that we can accommodate blank lines separating the sets.
If option="castep_elnes" we correct for the silly output format and "k" is 4.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
header_lines = []
cur_block = []
data_blocks = []
for i in range(len(lines)):
try:
x = float(lines[i].split()[0])
cur_block.append([float(y) for y in lines[i].split()])
except ValueError:
if cur_block != []:
data_blocks.append(cur_block)
cur_block = []
header_lines.append(lines[i])
if cur_block != []:
data_blocks.append(cur_block)
data_blocks = array(data_blocks)
if option=="castep_elnes":
npoints = data_blocks.shape[1]
data = zeros((data_blocks.shape[0], npoints/2, 4))
for i in range(data_blocks.shape[0]):
data[i,:,0] = data_blocks[i,0:npoints:2,0]
data[i,:,1] = data_blocks[i,0:npoints:2,1]
data[i,:,2] = data_blocks[i,1:npoints:2,0]
data[i,:,3] = data_blocks[i,1:npoints:2,1]
return data
else:
return array(data_blocks)
def read_ACF(filename):
""" charges, total_charge = read_ACF(filename)
Reads the ACF.dat bader output and returns the charge on each atom
along with the total charge.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
total_charge = float(lines[-1].split()[3])
charges = []
for line in lines[2:-4]:
charges.append(float(line.split()[4]))
return array(charges), total_charge
def remove_comments(lines, comment_delim="#",just_blanks=False):
""" stripped = remove_comments(lines, comment_delim="#", just_blanks=False)
Takes a sequence of lines (presumably from a data file)
and strips all comments, including ones at the end of
lines that are otherwise not comments. Note that we can
only deal with one kind of comment at a time - just apply
multiple times to strip multiple comment types.
Note that we also remove *blank* lines in here, just in
case we're going to do line-by-line processing subsequently
rather than join/splitting (eg. like for XSF files).
If just_blanks is specified, we only eliminate the blanks.
"""
stripped = []
for line in lines:
if just_blanks:
if line.strip() != "":
stripped.append(line.strip())
else:
if not (line.strip().startswith(comment_delim) or line.strip() == ""):
stripped.append(line.partition(comment_delim)[0].strip())
return stripped
def abinit_read_bands(filename):
""" bands, properties = abinit_read_bands(filename)
Reads in a _EIG output from ABINIT and returns as an array. Also reads in
the weightings (wtk) and the reduced coords of the kpoints.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
nkpts = int(lines[0].split()[4])
bands = []
kpts = []
wtks = []
curbands = []
for line in lines[1:]:
if line.startswith(" kpt#"):
if curbands != []:
bands.append(curbands)
curbands = []
wtks.append(float(line.strip().split()[5].strip(',')))
kpts.append([float(x) for x in line.strip().split()[7:10]])
else:
curbands = curbands + [float(x) for x in line.split()]
bands.append(curbands)
props = {}
props["kpts"] = kpts
props["wtks"] = wtks
return bands, props
def abinit_read_gw_bands(filename):
""" bands, properties = abinit_read_gw_bands(filename)
Same as abinit_read_bands except reads the eigenvalues from an abinit
_GW file. Also returns the kpoints and the eigenvalue corrections. Note that
the _GW file contains the levels in eV. For consistency in accordance with our
internal code policy we convert this to hartree.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
nkpts = int(lines[0].split()[0])
kpts = []
bandindices = []
bands = []
corrs = []
lines = lines[1:]
while len(lines) > 0:
curline = lines[0]
kpts.append([float(x) for x in curline.split()])
nvals = int(lines[1].split()[0])
lines = lines[2:]
curband = []
curbandindices = []
curcorrs = []
for i in range(nvals):
bits = lines[i].split()
curbandindices += [int(bits[0])]
curband += [float(bits[1])]
curcorrs += [float(bits[2])]
bands.append(curband)
corrs.append(curcorrs)
bandindices.append(curbandindices)
lines = lines[i+1:]
props = {}
props["indices"] = bandindices
props["corrs"] = corrs
props["kpts"] = kpts
bands = eV2hartree(bands)
return bands, props
def castep_read_bands(filename):
""" bands, properties = castep_read_bands(filename)
Reads in the SEED.bands from CASTEP and returns as an array. Also reads
in some of the key properties, such as the number of bands, number of electrons,
fermi level.
"""
f = open(filename, 'r')
lines = f.readlines()
nkpts = int(lines[0].split()[3])
nspins = int(lines[1].split()[4])
if nspins == 1:
nelectrons = [float(lines[2].split()[3])]
nbands = [int(lines[3].split()[3])]
efermi = [float(lines[4].split()[5])]
else:
nelectrons = [float(x) for x in lines[2].split()[3:]]
nbands = [int(x) for x in lines[3].split()[3:]]
efermi = [float(x) for x in lines[4].split()[5:]]
data = lines[9:]
bandsdict = {}
bands = []
kptdict = {}
for k in range(nkpts):
# Read the kpoint line because they aren't necessarily in order.
kptnum = int(data[0].split()[1])
kpt = array([float(x) for x in data[0].split()[2:5]])
kptdict[kptnum] = kpt
data = data[1:]
for s in range(nspins):
data = data[1:]
tmp = data[0:nbands[s]]
bandsdict[kptnum] = array([float(x.strip()) for x in tmp])
data = data[nbands[s]:]
for k in range(nkpts):
bands.append(bandsdict[k+1])
bands = array(bands)
props = {}
props["nbands"] = nbands
props["nkpts"] = nkpts
props["nspins"] = nspins
props["efermi"] = efermi
props["nelectrons"] = nelectrons
props["kpts"] = kptdict
return bands, props
def elk_parse_bands(filename):
""" path, bands = elk_parse_bands(filename="BAND.OUT")
Reads in the BAND.OUT file from Elk (can optionally pass
a different filename) and returns as an array. The first
array column is the path parameter and the subsequent columns
are the eigenvalues.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
sets = []
curset = []
curpath = []
have_path = False
for line in lines:
bits = line.split()
if len(bits) != 2:
if have_path:
sets.append(curset)
else:
sets.append(curpath)
sets.append(curset)
have_path=True
curset = []
curpath = []
else:
curpath.append(float(bits[0]))
curset.append(float(bits[1]))
return array(sets).T
def elk_write_bands(outfile="elk-bands.xy", infile="BAND.OUT"):
""" result = elk_write_bands(outfile="elk-bands.xy", infile="BAND.OUT")
Reads in the BAND.OUT file (another filename can optionally be specified
in infile) and writes to a multi-column, tab-delimited xy file for plotting.
"""
data = elk_parse_bands(infile)
f = open(outfile, 'w')
for i in range(data.shape[0]):
f.write("\t".join([str(x) for x in data[i,:]]) + "\n")
f.close()
return True
def abinit_read_dos(filename, properties=True):
""" dos[, properties] = abinit_read_dos(filename, properties=True)
Reads the _DOS files generated by Abinit. Properties is optional (default
is True) and if True returns a dictionary of properties read from the _DOS
file.
The dos return is an array of all the columns in the data section of the file.
What the columns mean depends on the type of DOS file - the value of
header["columns"] gives the title row for the file.
Current header contents: nsppol, nkpt, nband, fermi, iat, ratsph, columns.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
header = {}
datastart = 0
for i,l in enumerate(lines):
bits = l.split()
if l.strip().startswith("#"):
# Line is part of header, search for properties.
if "nsppol" in l:
header["nsppol"] = int(bits[3].rstrip(","))
header["nkpt"] = int(bits[6].rstrip(","))
if header["nsppol"] == 1:
header["nband"] = int(bits[8])
else:
header["nband"] = (int(bits[8].rstrip(",")), int(bits[10]))
if "Fermi energy" in l:
header["fermi"] = float(bits[4])
if "iat=" in l:
header["iat"] = int(bits[5])
if "ratsph" in l:
header["ratsph"] = float(bits[6])
if "energy(Ha)" in l:
header["columns"] = l.strip()
else:
datastart = i
break
cols = len(lines[datastart].split())
rows = len(lines[datastart:])
dos = zeros((rows,cols))
for i,l in enumerate(lines[datastart:]):
for j,bit in enumerate(l.split()):
dos[i,j] = float(bit)
if properties:
return dos, header
else:
return dos
def abinit_collate_dos(files):
""" dos_sum, column_title, fermi = abinit_collate_dos(files)
Sums together dos components from separate _DOS_ATXXXX files from abinit.
We assume column 0 is the energy column (not summed) and the remainder are
all added together in the return array. The output column_title is just
one of the file header["columns"] values (from the last file, in fact) and
is used in abinit_write_collated_dos. Same for fermi.
"""
multi_dos = []
column_title = ""
fermi = 0.0
for filename in files:
dos, header = abinit_read_dos(filename)
column_title = header["columns"] # Just take the last one
fermi = header["fermi"] # and here
multi_dos.append(dos)
dos_sum = zeros(multi_dos[0].shape)
for dos in multi_dos:
# If the shapes mismatch, the following is guaranteed to fail.
dos_sum[:,1:] = dos_sum[:,1:] + dos[:,1:]
# Just grab the energy scale from the first data set.
dos_sum[:,0] = multi_dos[0][:,0]
return dos_sum, column_title, fermi
def abinit_write_collated_dos(files, outfile="collated_dos.xy"):
""" result = abinit_write_collated_dos(files, outfile="collated_dos.xy")
Writes collated dos to the specified file. Format is just multi-column xy
with the fermi and column_title outputs used as header lines.
"""
f = open(outfile, 'w')
dos, title, fermi = abinit_collate_dos(files)
f.write("# Fermi energy: " + str(fermi) + " Ha\n")
f.write(title + "\n")
for i in range(dos.shape[0]):
line = "\t".join([str(x) for x in dos[i,:]]) + "\n"
f.write(line)
f.close()
return True
def abinit_parse(istr):
""" result = abinit_parse(istr)
Tries to parse a string according to abinit input file
rules. There are only three symbols (*, / and sqrt).
Returns a result list, possible results are "single",
for a single data value contained in result[1], "multiple",
for a list of data values contained in result[1],
or "fill", for a single data value in result[1] that
is meant to fill a whole array.
"""
# Note the use of the {'sqrt' : sqrt} in the eval
# statements: this is to restrict the namespace
# available in the eval function so we don't get
# a namespace overlap which would happily eval
# a named variable, causing a logic error.
if "*" in istr.lower():
factor = istr.lower().partition("*")[0].strip()
try:
operand = float(eval(istr.lower().partition("*")[2], {'sqrt' : sqrt}))
except NameError:
return "nonsense", istr.lower().partition("*")[2]
if factor is "":
return "fill", operand
else:
return "multiple", int(factor) * [operand]
else:
try:
return "single", float(eval(istr.lower(), {'sqrt' : sqrt}))
except NameError:
return "non-value", istr.lower()
def abinit_value(data, keyword, nvalues):
""" values = abinit_value(data, keyword, nvalues)
Using the raw abinit data stream, parses the value of the
given keyword. Because abinit allows rudimentary mathematical
expressions in the input file, we have to actually parse data
in most cases rather than just read and convert it. Darn.
Because we have to parse, we *must* know how many values
we are looking for. Note that we are also merciless about
whitespace here - if abinit would misread the input file,
so will we.
Note that we always read numerical values as floats -
"""
print "Reading in variable %s, looking for %d values." % (keyword, nvalues)
values = []
# The action starts at the index of the keyword.
try:
start = data.index(keyword)
except ValueError:
print "abinit_value WARNING: keyword %s is not in the specified data stream." % keyword
return None
# Since we don't know how many bits will unpack into
# the required nvalues items, we need to possible loop
# over all of them...
for i, d in enumerate(data[start+1:]):
try:
val = float(d)
values.append(val)
except ValueError:
# Need to parse this one.
result = abinit_parse(d)
if result[0] == "single":
values.append(result[1])
elif result[0] == "multiple":
values = values + result[1]
elif result[0] == "fill":
remaining = nvalues - len(values)
values = values + remaining * [result[1]]
return values
# Note "is" does NOT work here - need ==. Why?
if len(values) == nvalues:
return values
# If we get to here, we must not have enough values. Better raise an error.
def abinit_unit(data, keyword):
""" unit = abinit_unit(data, keyword)
Returns the next non-value item after the appearance
of keyword. At the moment recognizes:
"ry", "angstr", "angstrom", "k", "ev", "ha", "bohr"
"hartree"
and is case insensitive.
If a unit is recognized it is returned in the form used
in this library (ang, eV, etc), otherwise None is returned.
"""
try:
start = data.index(keyword)
except ValueError:
print "abinit_unit WARNING: keyword %s is not in the specified data stream." % keyword
return None
for i, d in enumerate(data[start+1:]):
print "Checking to see if %s is a unit..." % d
result = abinit_parse(d)
if result[0] == "non-value":
print "%s might be a unit!" % d
if result[1] in ["angstrom", "angstr"]:
return "ang"
elif result[1] == "ev":
return "eV"
elif result[1] == "bohr":
return "bohr"
elif result[1] in ["ha", "hartree"]:
return "hartree"
elif result[1] == "k":
return "K"
elif result[1] == "ry":
return "Ry"
else:
return None
return None
def abinit_int(data, keyword, nvalues):
""" values = abinit_int(data, keyword, nvalues)
This is just a convenience function that converts a list
of floats returned by abinit_value into ints. If the list
only has one member, it returns the integer rather than
the list.
"""
vals = abinit_value(data, keyword, nvalues)
if vals is None:
return None
elif len(vals) == 1:
return int(vals[0])
else:
return [int(x) for x in vals]
def abinit_array(data, keyword, nvalues, newshape=None):
""" values = abinit_array(data, keyword, nvalues, newshape=None)
Convenience function that wraps abinit_value but
returns a numpy array shaped according to the newshape input.
If newshape is left blank, the array is shaped to be n x 3
(rows x columns) where n = nvalues / 3
"""
vals = abinit_value(data, keyword, nvalues)
# SNEAKY! array(None) is NOT NONE, so have to test
# prior to conversion. Bloody Numpy!
if vals is None:
return None
else:
vals = array(vals)
if newshape is None:
return reshape(vals, (-1, 3))
else:
return reshape(vals, newshape)
def elk_value(data, keyword, nvalues):
""" values = elk_values(data, keyword, nvalues)
Returns nvalues values from an elk input datastream corresponding to
the given keyword. Items are returned as strings - use wrapper functions to
get floats and ints.
"""
if keyword in data:
start = data.index(keyword)+1
try:
return [float(x) for x in data[start:start+nvalues]]
except ValueError:
# value is not convertible to a float: must be a string instead.
return data[start:start+nvalues]
else:
print "elk_value WARNING: keyword %s does not exist in this file." % keyword
return None
def elk_array(data, keyword, nvalues, newshape=None):
""" a = elk_array(data, keyword, nvalues, newshape=None)
Convenience function wrapping elk_value but returning floats. Returns a
single value if nvalues is 1.
"""
vals = elk_value(data, keyword, nvalues)
if vals is None:
return None
elif len(vals) == 1:
return float(vals[0])
else:
vals = array(vals)
if newshape is None:
return vals
else:
return vals.reshape(newshape)
def elk_int(data, keyword, nvalues):
""" i = elk_int(data, keyword, nvalues)
Convenience function returning integers. Will return a single int if nvalues
is one, otherwise returns a *list* of ints (not array type).
"""
vals = elk_value(data, keyword, nvalues)
if vals is None:
return None
elif len(vals) == 1:
return int(vals[0])
else:
return [int(x) for x in vals]
def chop128(in_string):
""" out_string = chop128(in_string)
The abinit input file format requires that each individual
line be less than 132 characters long otherwise it ignores
the rest of the input.
"""
tail = in_string.split('\n')[-1]
head = in_string.split('\n')[:-1]
if len(tail) <= 128:
return in_string
else:
return "\n".join(head + [tail[0:128]] + [chop128(tail[128:])])
def write_cube(filename, positions, species, lattice, datagrid, timestep=0):
""" succeeded = write_cube(filename, positions, species, lattice, datagrid, timestep=0)
Writes a CUBE file containing the passed information. The datagrid must be a 3D array.
We can't animate, so we must specify the timestep. By default this is the first timestep.
"""
pos = positions[timestep]
spec = species[timestep]
lat = lattice[timestep]
f = open(filename, 'w')
# First two lines of a CUBE file are comments.
f.write("CUBE\n")
f.write("Output created by esc_lib.\n")
# Number of atoms then the origin of the density coordinate system.
f.write("%d 0.000000 0.000000 0.000000\n" % len(spec))
# Each of the next three lines specifies the number of grid points in an vector
# direction, then gives the step vector itself.
nx = datagrid.shape[0]
ny = datagrid.shape[1]
nz = datagrid.shape[2]
f.write("%d %g %g %g\n" % (nx, lat[0][0] / nx, lat[0][1] / nx, lat[0][2] / nx))
f.write("%d %g %g %g\n" % (ny, lat[1][0] / ny, lat[1][1] / ny, lat[1][2] / ny))
f.write("%d %g %g %g\n" % (nz, lat[2][0] / nz, lat[2][1] / nz, lat[2][2] / nz))
# Now list atomic number, charge, position (absolute) for each atom. We don't
# actually deal with charge here, so set to 0.
for p, s in zip(pos, spec):
f.write("%d 0.000000 %g %g %g\n" % (s, p[0], p[1], p[2]))
for i in range(nx):
for j in range(ny):
for k in range(nz):
f.write("%g " % datagrid[i,j,k])
# Throw in a newline every now and then to keep the output readable.
if k % 6 == 5:
f.write("\n")
f.write("\n")
f.close()
return True
def write_cube_density(filename, positions, species, lattice, densities, timestep=0):
""" succeeded = write_cube_density(filename, positions, species, lattice, density, timestep=0)
Writes a CUBE file containing the passed information. Note that we
can't deal with non-crystals so the lattice variable must be passed.
The CUBE file format requires 3D volumetric data, in this case the density.
Since we can't animate, we have to specify a timestep to use. By default this is the
first timestep.
"""
return write_cube(filename, positions, species, lattice, densities[timestep], timestep)
def write_xsf(filename, positions, species, lattice=None, letter_spec=True):
""" succeeded = write_xsf(filename, positions, species, lattice=None, letter_spec=True)
Writes a XSF file containing the passed information. Can be animated,
in crystal or molecular format, fixed cell or variable cell.
If letter_spec is True (default), writes the species identifier as a letter
rather than a nuclear Z value.
NOTE: NEED TO ADD OPTIONAL FORCES!
"""
if DEBUG: print len(positions)
# Convert everything back to angstroms for XSF
apos = bohr2ang(positions)
if lattice is not None:
alat = bohr2ang(lattice)
else:
alat = None
f = open(filename, 'w')
if len(apos) > 1:
f.write("ANIMSTEPS %d\n" % len(apos))
if alat is not None:
f.write("CRYSTAL\n")
if alat is not None and len(alat) == 1:
f.write("PRIMVEC\n")
f.write(" %g %g %g\n" % (alat[0][0][0], alat[0][0][1], alat[0][0][2]))
f.write(" %g %g %g\n" % (alat[0][1][0], alat[0][1][1], alat[0][1][2]))
f.write(" %g %g %g\n" % (alat[0][2][0], alat[0][2][1], alat[0][2][2]))
for i in range(len(apos)):
if alat is None:
f.write("ATOMS %d\n" % (i+1))
if alat is not None and len(alat) > 1:
f.write("PRIMVEC %d\n" % (i+1))
f.write(" %g %g %g\n" % (alat[i][0][0], alat[i][0][1], alat[i][0][2]))
f.write(" %g %g %g\n" % (alat[i][1][0], alat[i][1][1], alat[i][1][2]))
f.write(" %g %g %g\n" % (alat[i][2][0], alat[i][2][1], alat[i][2][2]))
f.write("PRIMCOORD %d\n" % (i+1))
f.write("%d 1\n" % len(apos[i]))
else:
f.write("PRIMCOORD %d\n" % (i+1))
f.write("%d 1\n" % len(apos[i]))
for j in range(len(apos[i])):
if letter_spec:
f.write("%s %g %g %g\n" % (elements[species[i][j]], apos[i][j][0], apos[i][j][1], apos[i][j][2]))
else:
f.write("%d %g %g %g\n" % (species[i][j], apos[i][j][0], apos[i][j][1], apos[i][j][2]))
f.close()
return True
def write_aims(filename, positions, species, lattice, xtype="ang", opt=None, timestep=0):
""" succeeded = write_aims(filename, positions, species, lattice, opt=None, timestep=0)
Writes a FHI-aims geometry.in file using the given positions, species and lattice.
FHI-aims allows either periodic or non-periodic boundary conditions. For periodic,
specify xtype="frac" and provide lattice vectors. For non-periodic, specify xtype=
"ang". No lattice vectors will be written in that case.
"""
pos = positions[timestep]
spec = species[timestep]
if xtype == "frac":
avec = lattice[timestep]
pos = cart2reduced(pos, avec)
avec = bohr2ang(avec)
elif xtype == "ang":
pos = bohr2ang(pos)
else:
print "write_aims ERROR: Must specify xtype=ang or frac."
return False
f = open(filename, 'w')
f.write("# geometry.in written by esc_lib.py\n\n")
if xtype == "ang":
for s, p in zip(spec, pos):
f.write("atom %4.8g %4.8g %4.8g %s\n" % (p[0], p[1], p[2], elements[s]))
elif xtype == "frac":
for l in avec:
f.write("lattice_vector %4.8g %4.8g %4.8g\n" % (l[0], l[1], l[2]))
f.write("\n")
for s, p in zip(spec, pos):
f.write("atom_frac %4.8g %4.8g %4.8g %s\n" % (p[0], p[1], p[2], elements[s]))
f.close()
return True
def write_castep(filename, positions, species, lattice, xtype="ang", opt=None, timestep=0):
""" succeeded = write_castep(filename, positions, species=None, xtype="bohr", opt=None, timestep=0)
Writes a CASTEP .cell file using the passed positions. Unlike the abinit case,
species must NOT be None here. Options for xtype are "ang", "bohr" or "frac".
We assume, as always, that EVERYTHING internal is in atomic units, so a
conversion is performed if "frac" is specified using the passed lattice
vectors. Also, if fractional output is specified, the lattice vectors
are output in angstroms, the CASTEP default length unit.
opt is a dictionary that gives output options. Options are:
'special atom' : n - index of atom that is special. Will have ":special" appended
to the Z number in the positions_abs/frac block so that
you can specify a separate species_pot pseudopotential
generation string or PSP file.
"""
pos = positions[timestep]
avec = lattice[timestep]
spec = species[timestep]
# Do conversions if necessary.
if xtype == "ang":
pos = bohr2ang(pos)
avec = bohr2ang(avec)
elif xtype == "frac":
pos = cart2reduced(pos,avec)
avec = bohr2ang(avec)
f = open(filename, 'w')
f.write("%block lattice_cart\n")
if xtype == "bohr":
f.write(" bohr\n")
for v in avec:
f.write(" %010e %010e %010e\n" % (v[0], v[1], v[2]))
f.write("%endblock lattice_cart\n")
f.write("\n")
if xtype == "frac":
f.write("%block positions_frac\n")
else:
f.write("%block positions_abs\n")
if xtype == "bohr":
f.write(" bohr\n")
for i, (s, p) in enumerate(zip(spec, pos)):
if opt is not None and 'special atom' in opt:
if opt["special atom"] == i+1:
f.write(" %s %010e %010e %010e\n" % (elements[s]+":special", p[0], p[1], p[2]))
else:
f.write(" %s %010e %010e %010e\n" % (elements[s], p[0], p[1], p[2]))
else:
f.write(" %s %010e %010e %010e\n" % (elements[s], p[0], p[1], p[2]))
if xtype == "frac":
f.write("%endblock positions_frac\n")
else:
f.write("%endblock positions_abs\n")
f.close()
return True
def write_abinit(filename, positions, species=None, xtype="bohr", opt=None, timestep=0):
""" succeeded = write_abinit(filename, positions, species=None, xtype="ang", timestep=0)
Writes the passed positions in a format suitable to be copy and pasted
into an abinit input file. If species are passed, the natom, ntypat,
typat and znucl parts are also output. Options for xtype are "ang", for
xangst output (Default) or "bohr" for xcart output.
One needs to specify a timestep since the positions variable can be animated
as can the species variable - the default is zero.
opt is a dictionary of special options. Can have:
'special atom' : n (index of a special atom, given a unique label and
moved to the first position in the atoms list. For use
with conducti/core hole style calculations where the
first atom is special according to abinit.
Note: NEED TO ADD xred OUTPUT!
"""
pos = positions[timestep]
# If we have a special atom, have to swap it to position 0.
if opt is not None and 'special atom' in opt:
n = opt['special atom']
pos0 = pos[0]
posn = pos[n-1]
pos[0] = posn
pos[n-1] = pos0
f = open(filename, 'w')
if species is not None:
spec = species[timestep]
# If we have special atom, have to swap it's species to the first pos
# and give it a special element type.
if opt is not None and 'special atom' in opt:
n = opt['special atom']
spec0 = spec[0]
specn = spec[n-1]
spec[0] = specn
spec[n-1] = spec0
f.write("natom %d\n" % len(spec))
f.write("ntypat %d\n" % (len(uniqify(spec[1:])) + 1))
f.write("znucl %s\n" % (str(spec[0])+" "+" ".join([str(x) for x in uniqify(spec[1:])])))
spec_dict = {}
typat = []
for i,s in enumerate(uniqify(spec[1:])):
spec_dict[s] = i+2
for s in spec[1:]:
typat.append(str(spec_dict[s]))
typatstr = "1 " + " ".join(typat)
else:
f.write("natom %d\n" % len(spec))
f.write("ntypat %d\n" % len(uniqify(spec)))
f.write("znucl %s\n" % " ".join([str(x) for x in uniqify(spec)]))
# Generate typat string
spec_dict = {}
typat = []
for i,s in enumerate(uniqify(spec)):
spec_dict[s] = i+1
for s in spec:
typat.append(str(spec_dict[s]))
typatstr = " ".join(typat)
# Abinit complains if a line in an input file > 132 characters in length
# so we break every 100 characters if necessary.
if len(typatstr) >= 132:
typatstr = chop128(typatstr)
f.write("typat %s\n" % typatstr)
if xtype == "bohr":
f.write("xcart\n")
for p in pos:
f.write(" %010e %010e %010e\n" % (p[0], p[1], p[2]))
if xtype == "ang":
f.write("xangst\n")
for p in bohr2ang(pos):
f.write(" %010e %010e %010e\n" % (p[0], p[1], p[2]))
f.close()
return True
def write_elk(filename, positions, species, is_crystal=False, lattice=None, timestep=0):
""" succeeded = write_elk(filename, positions, species, is_crystal=False, lattice= None, timestep=0)
Writes the passed positions and species in a format suitable to be copy and
pasted into an elk input file. Since elk requires atomic units, we do not
have a unit conversion option here. If is_crystal is True, the coordinates
are written as reduced with respect to the specified lattice.
As usual we can accommodate animated data with a timestep parameter, the
default is zero (first timestep).
Note: MOLECULAR INPUT TO ELK IS NOT WORKING IN 1.4.5
"""
pos = positions[timestep]
spec = species[timestep]
f = open(filename, 'w')
if not is_crystal:
f.write("molecule\n")
f.write(" .true.\n\n")
else:
if lattice is None:
raise ESCError("write_elk", "ERROR: If is_crystal is False, you must specify a lattice")
else:
lat = lattice[timestep]
pos = cart2reduced(pos, lat)
f.write("avec\n")
f.write(" %g %g %g\n" % (lat[0][0], lat[0][1], lat[0][2]))
f.write(" %g %g %g\n" % (lat[1][0], lat[1][1], lat[1][2]))
f.write(" %g %g %g\n" % (lat[2][0], lat[2][1], lat[2][2]))
f.write("\n")
f.write("atoms\n")
f.write(" %d\n" % len(uniqify(spec))) # Total number of atoms
for s in uniqify(spec):
f.write(" 'REPLACE.in'\n") # Replace in the output file
# with the species filename
f.write(" %d\n" % spec.count(s)) # Number of atoms of species s
for i,p in enumerate(pos):
if spec[i] == s:
f.write(" %g %g %g 0.000000 0.000000 0.000000\n" % (p[0], p[1], p[2]))
f.write("\n")
f.close()
return True
def recip_lattice(avec):
""" bvec = recip_lattice(avec)
Computes the reciprocal lattice vectors for a given crystal lattice. We
assume avec is in the form generated for the Atoms class - [a1, a2, a3]
where ai is the ith lattice vector (an array).
bvec is returned in the same format.
"""
A = mat(avec).T # columns of A are the crystal lattice vectors
B = (2 * pi * inv(A)) # Rows of B are the recip lattice vectors
return [array(v) for v in B.tolist()]
def g_vectors(bvec, ngrid):
""" gv, [gcx,gcy,gcz], gb = g_vectors(bvec, ngrid)
Returns a list of g-vectors (Nx3 array, fortran order).
gcx/y/z is a list of three arrays containing the actual recip
vector indices (ie, they go negative as appropriate).
gb is the biggest G-vector norm.
"""
gcx = zeros((ngrid[0]))
gcy = zeros((ngrid[1]))
gcz = zeros((ngrid[2]))
for i in range(0,int(ngrid[0]/2)+1):
gcx[i] = i
for i in range(int(ngrid[0]/2) + 1, ngrid[0]):
gcx[i] = i - ngrid[0]
for i in range(0,int(ngrid[1]/2)+1):
gcy[i] = i
for i in range(int(ngrid[1]/2) + 1, ngrid[1]):
gcy[i] = i - ngrid[1]
for i in range(0,int(ngrid[2]/2)+1):
gcz[i] = i
for i in range(int(ngrid[2]/2) + 1, ngrid[2]):
gcz[i] = i - ngrid[2]
biggest = 0.0
gvs = []
for k in range(ngrid[2]):
tmpz = gcz[k] * bvec[2]
for j in range(ngrid[1]):
tmpy = gcy[j] * bvec[1]
for i in range(ngrid[0]):
gv = tmpz + tmpy + gcx[i] * bvec[0]
if norm(gv) > biggest:
biggest = norm(gv)
gvs.append(gv)
return array(gvs), [gcx, gcy, gcz], biggest
def integrate_grids(a, b):
""" result = integrate_grids(a, b)
Performs a triple integration of a, b (complex)
over their own grids.
Note: this is a LOT slower than the Fortran version in
libdft.
"""
# Check the shapes are the same
if (a.shape != b.shape):
print "(integrate_grids) ERROR: a and b don't have the same shape!"
return None
# Construct integrand a.conj() * b
igrnd = a.conj() * b
isum = 0.0+0j
nx = a.shape[0]
ny = a.shape[1]
nz = a.shape[2]
for k in range(nz):
for m in range(ny):
for i in range(nx):
isum += igrnd[i,m,k]
return isum/(nx*ny*nz)
def cart2reduced(position, lattice):
""" reduced = cart2reduced(position, lattice)
Converts a cartesian coordinate to a reduced coordinate with respect to
the lattice. Works recursively on lists.
"""
if type(position) == type([]):
return [cart2reduced(x, lattice) for x in position]
else:
return array(mat(position) * mat(lattice).I).reshape((3,))
def reduced2cart(position, lattice):
""" cart = reduced2cart(position, lattice)
Converts a reduced coordinate to a cartesian coordinate with respect to the
supplied lattice. Works recursively on lists.
"""
if type(position) == type([]):
return [reduced2cart(x, lattice) for x in position]
else:
return array(mat(position) * mat(lattice)).reshape((3,))
def bohr2ang(bohr):
""" ang = bohr2ang(bohr)
Converts bohr units to angstrom, conversion factor 1 bohr = 0.52917721092 ang
Woohoo recursive function!
"""
if type(bohr) == type([]):
# Call on each element and return
return [bohr2ang(x) for x in bohr]
else:
return bohr * 0.52917721092
def ang2bohr(ang):
""" bohr = ang2bohr(ang)
Converts angstroms to bohr, conversion factor 1 ang = 1.0 / 0.52917721092 bohr
"""
if type(ang) == type([]):
return [ang2bohr(x) for x in ang]
else:
return ang / 0.52917721092
def eV2hartree(eV):
""" hartree = eV2hartree(eV)
Converts eV to hartree, conversion factor 1 Ht = 27.21138505 eV
"""
if type(eV) == type([]):
return [eV2hartree(x) for x in eV]
else:
return eV / 27.21138505
def hartree2eV(hartree):
""" eV = hartree2eV(hartree)
Converts hartree to eV, conversion factor 1 eV = 1.0 / 27.21138505 Ht
"""
if type(hartree) == type([]):
return [hartree2eV(x) for x in hartree]
else:
return hartree * 27.21138505
def uniqify(sequence, trans=None):
""" unique = uniqify(sequence, trans)
Produces an order-preserved list of unique elements in the passed
sequence. Supports a transform function so that passed elements
can be transformed before comparison if necessary.
"""
if trans is None:
def trans(x): return x
seen = {}
unique = []
for item in sequence:
marker = trans(item)
if marker in seen: continue
seen[marker] = 1
unique.append(item)
return unique
class ESCError(Exception):
""" Exception raised for errors within the esc_lib code. """
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
print expr
print msg
class Spectra:
""" Spectra class: designed to deal with common manipulations on
spectra output by electronic structure codes like Abinit.
Print Spectra.__init__.__doc__ to see the various ways you can
construct a Spectra object.
"""
def __init__(self, seed, atoms,source):
""" spectra = Spectra(seed, atoms, source)
Create a Spectra object. For conducti output, use source="conducti". For
nexspec output, use source="nexspec". Should also add conventional CASTEP
ELNES task output here (will be source="castep").
For nexspec output, set atoms to a number indicating the species you want to
read in. For conducti output, atoms should be a range (indicating the 1-based atom
indices from the original abinit input file.
If the nexspec output is a series of separate runs with files of the form:
SEED_X_S_1_1_1.nexafs
where X is an atom identifier (usually means the location of the core hole)
and S is the species (the 1s could foreseeably vary but typically won't), you
can specify source="nexspec-corehole" to parse the atomic index correctly.
"""
if source == "conducti":
opt = {'seed name' : seed, 'atoms' : atoms}
self.loadConductiPawCore(opt)
elif source == "nexspec":
self.loadNexspec(seed, atoms)
elif source == "nexspec-corehole":
self.loadNexspec(seed, atoms, option="corehole")
def loadNexspec(self, seed, species, option=""):
""" Spectra.loadNexspec(seed, species, option="")
Internal: inits a Spectra object from nexspec output based on the CASTEP ELNES
task.
NOTE: We can't yet deal with the situation where there are multiple core level
spectra per atom (ie, 1s, 2s, 2p levels and so on). The current algorithm only
stores the highest nlm-combination or whatever comes last as listed by listdir.
The option input can presently be:
option="" (default) - file name format is SEED_S_X_B_C.nexafs, read the atom
index from X and match the species to S.
option="corehole" - file name format is SEED_X_S_A_B_C.nexafs, read the
atom index from X and match the species to S.
"""
self.data = {}
self.atoms=[]
# Cycle over the contents of the directory containing the nexspec .nexafs outputs
# and pick out/load the files starting with seed_atoms
head,tail = os.path.split(seed)
if head is "":
# We are in the current directory
files = os.listdir(os.getcwd())
else:
files = os.listdir(head)
for filename in files:
if filename.startswith(seed) and filename.endswith(".nexafs") and int(filename.split("_")[-4]) == species:
if option == "corehole":
self.atoms.append(int(filename.split("_")[1]))
self.data[int(filename.split("_")[1])] = loadtxt(filename)
elif option == "":
self.atoms.append(int(filename.split("_")[2]))
self.data[int(filename.split("_")[2])] = loadtxt(filename)
self.cmpts = zeros((len(self.atoms), self.data[self.atoms[0]].shape[0],7))
for i,a in enumerate(self.atoms):
# Now: we have to stick to our rule of always storing and dealing in Hartree
# atomic units. The nexspec output is in eV, so convert.
self.data[a][:,0] = eV2hartree(self.data[a][:,0])
self.cmpts[i,:,:] = self.data[a][:,0:7]
def loadConductiPawCore(self, opt):
""" Spectra.loadConductiPawCore(opt)
Internal: inits a Spectra object from Conducti PAW core-level outputs.
Note: We can't deal with multiple core-levels in the same file (yet). These will
be ignored in the read.
"""
self.data = {}
self.atoms = opt['atoms']
self.spectra_type = "conducti_paw_core"
for i in self.atoms:
filename = opt['seed name']+str(i)
self.data[i] = loadtxt(filename)
# Construct a cmpts array (natoms,npoints,2) from data.
self.cmpts = zeros((len(self.atoms), self.data[self.atoms[0]].shape[0],7))
for i,a in enumerate(self.atoms):
self.cmpts[i,:,:] = self.data[a][:,0:7]
def spectrumAXYZ(self, atom, evec):
""" spectrum = Spectra.spectrumAXYZ(atom, evec)
Generates the spectrum for a given electric field direction
and atom.
The evec vector is normalized before use. The spectrum is returned as a
two-column array with the second column the spectrum and the first column
the independent variable.
"""
# Construct spectrum from components
cmpts = self.data[atom]
return spectra.spectrum_axyz(cmpts, evec)
def spectrumXYZ(self, evec):
""" spectrum = Spectra.spectrumXYZ(evec)
Generates the spectrum across all atoms for a given e-field direction.
See spectrumAXYZ for more details.
"""
return spectra.spectrum_xyz(self.cmpts, evec)
def spectrumTP(self, theta, phi):
""" spectrum = Spectra.spectrumTP(theta,phi)
Generates the spectrum across all atoms for a given e-field direction
expressed as (theta, phi) in spherical coordinates with theta the polar
angle with respect to the original z axis.
"""
return spectra.spectrum_tp(self.cmpts, theta, phi)
def spectrumComponents(self):
""" components = Spectra.spectrumComponents()
Returns an array of the same shape as any of the Spectra.data[i]
but with the 1:6 components (inclusive) summed over all sets i.
"""
components = zeros(self.data[self.atoms[0]].shape)
components[:,0] = self.data[self.atoms[0]][:,0]
for a in self.atoms:
for i in [1,2,3,4,5,6]:
components[:,i] = components[:,i] + self.data[a][:,i]
# Note we ignore the 7th column from conducti, it's just an arbitary combination
# of components set from conducti.
components = components[:,0:7] # Cut off the last column
return components
def writeSpectrumComponents(self,filename):
""" succeeded = Spectra.writeSpectrumComponents(filename)
Writes out the output of Spectra.spectrumComponents() to disk
with the given filename.
"""
f = open(filename, 'w')
cmpts = self.spectrumComponents()
f.write("# Spectrum components from conducti and esc_lib.py\n")
f.write("#\n")
f.write("# E XX YY ZZ XY XZ YZ\n")
for c in cmpts:
f.write("\t".join([str(x) for x in c])+"\n")
f.close()
def spectrumATP(self, atom, theta, phi):
""" spectrum = Spectra.spectrumATP(atom, theta, phi)
Generates a spectrum for *EFIELD* angles theta and phi with respect
to the coordinate system of the original cell used to compute the
spectrum. Note that these angles are NOT those used in experiments as the
incident light is at right angles to the e-field and the polarization
remains a free angle.
Here theta is the polar angle (with respect to z axis) and phi is the
azimuthal angle in the xy plane.
We assume theta and phi are in degrees and do the conversion.
"""
cmpts = self.data[atom]
return spectra.spectrum_atp(cmpts, theta,phi)
def optimizePeak(self,peak, maximize=True):
""" fit_result = Spectra.optimizePeak(peak)
Finds the theta,phi that maximizes the given peak. Alternatively if
maximize=False, we minimize instead. We return the output of the
scipy.optimize minimize method.
"""
i = spectra.closest_index_to_energy(self.cmpts[0,:,0],peak)
if maximize:
s = -1.0
else:
s = 1.0
def optfunc(p):
return s * spectra.spectrum_tp(self.cmpts,p[0],p[1])[i,1]
return minimize(optfunc,[rand()*180,rand()*360])
def spectrumRandom(self, samples=1000):
""" spectrum = Spectra.spectrumRandom(samples=1000)
Uses random numbers to generate angles for theta and phi, and sums
over the spectra generated for each random angle. Optionally set
samples to decide how many random angle sets are used.
"""
spectrum = zeros((self.cmpts.shape[1], 2))
for i in range(samples):
spectrum[:,1] = spectrum[:,1] + self.spectrumTP(rand()*180, rand()*360)[:,1]
spectrum[:,0] = self.cmpts[0,:,0]
spectrum[:,1] = spectrum[:,1] / samples
return spectrum
def fitXYZ(self, exp_data, energy_range=None, energy_offset=0.0):
""" a, b, c, I0, data = Spectra.fitXYZ(exp_data, energy_range=None, energy_offset=0.0)
Takes an experimental spectrum (Nx2-shaped array, first column is energy,
second column is intensity) and finds the best linear combination of
orthogonal spectra I0 * (aX+bY+cZ) to fit. Note that this is physically
incorrect but seems to be what people do in the papers...
See the docstring for Spectra.bestFitToExperiment to find out more about
the parameters. The only difference is that here we do internal unit
conversions, assuming the exp_data is in eV.
"""
if energy_range is None:
estart = 0
eend = self.cmpts.shape[1]
else:
estart = spectra.closest_index_to_energy(self.cmpts[0,:,0], energy_range[0])
eend = spectra.closest_index_to_energy(self.cmpts[0,:,0], energy_range[1])
edat = exp_data.copy() # So we don't mess with the input variable
edat[:,0] = eV2hartree(edat[:,0] - energy_offset)
print "Exp_data bounds: ", amin(edat[:,0]), amax(edat[:,0])
y = interp1d(edat[:,0], edat[:,1], bounds_error=False, fill_value=0.0)
# Get our three orthogonal spectra
X = spectra.spectrum_xyz(self.cmpts, [1,0,0])
Y = spectra.spectrum_xyz(self.cmpts, [0,1,0])
Z = spectra.spectrum_xyz(self.cmpts, [0,0,1])
yexp = array([y(x) for x in self.cmpts[0,estart:eend+1,0]])
def fitfunc(p):
# Function is p[0] * (p[1] * X + p[2] * Y + p[3] * Z)
return norm(p[0] * (p[1] * X[estart:eend+1,1] + p[2] * Y[estart:eend+1,1] + p[3] * Z[estart:eend+1,1])-yexp)
r = fmin_slsqp(fitfunc, [1000.0, 0.3, 0.3, 0.3], bounds=[[0.1,1e8],[0.0,1.0],[0.0,1.0],[0.0,1.0]], full_output=True)
cdat = spectra.spectrum_xyz(self.cmpts, [r[0][1],r[0][2],r[0][3]])
data = zeros((cdat.shape[0], 7))
data[:,0] = cdat[:,0]
data[:,1] = array([y(x) for x in self.cmpts[0,:,0]])
data[:,2] = r[0][0] * (r[0][1] * X[:,1] + r[0][2] * Y[:,1] + r[0][3] * Z[:,1])
data[:,3] = r[0][0] * r[0][1] * X[:,1]
data[:,4] = r[0][0] * r[0][2] * Y[:,1]
data[:,5] = r[0][0] * r[0][3] * Z[:,1]
data[:,6] = r[0][0] * cdat[:,1]
return r[0][1], r[0][2], r[0][3], r[0][0], data
def bestFitToExperiment(self, exp_data, energy_range=None, energy_offset=0.0):
""" theta, phi, I0, data = Spectra.bestFitToExperiment(exp_data, energy_range=None, energy_offset=0.0)
Takes an experimental spectrum (Nx2-shaped array, first column is energy,
second column is intensity) and perform a least squares fit over
the specified energy range. If the energy range is omitted (or None),
the whole range of the computed data is used. Note that the experimental
data is interpolated and set to zero outside the experimental energy range.
The energy_offset parameter can be used to impose a rigid x-axis shift of
the computed values before fitting. If None, the optimal x-axis shift is
computed as a fitting parameter. By default the shift is 0.0, not None.
Note that we do *not* do unit conversions here, these must be accounted
for before entry into the routine. The data output is a Nx3 array, first
column is the energy scale, second is the experimental and third is the
fitted computed data.
"""
if energy_range is None:
estart = 0
eend = self.cmpts.shape[1]
else:
estart = spectra.closest_index_to_energy(self.cmpts[0,:,0], energy_range[0])
eend = spectra.closest_index_to_energy(self.cmpts[0,:,0], energy_range[1])
if DEBUG:
print "Fitting computational data between indices %d and %d." % (estart, eend)
print "Exp_data range is", amin(exp_data[:,0]), amax(exp_data[:,0])
y = interp1d(exp_data[:,0], exp_data[:,1], bounds_error=False, fill_value=0.0)
ran = self.spectrumRandom()
yexp = array([y(x-energy_offset) for x in self.cmpts[0,estart:eend+1,0]])
def fitfunc(p):
# p[0] = theta, p[1] = phi, p[2] = I0, p[3] = mixing fraction
spec = spectra.spectrum_tp(self.cmpts, p[0],p[1])
return norm(p[2] * (p[3] * spec[estart:eend+1,1] + (1-p[3]) * ran[estart:eend+1,1]) - yexp)
r = fmin_slsqp(fitfunc, [45.0, 45.0, 100, 0.5], bounds=[[0.0,180.0],[0.0,360.0], [0.1, 1e5], [0.0, 1.0]], full_output=True, iter=1000, iprint=2, acc=1.0e-9, epsilon=0.001)
cdat = spectra.spectrum_tp(self.cmpts, r[0][0], r[0][1])
cdat[:,0] = cdat[:,0] - energy_offset
cdat[:,1] = r[0][2] * cdat[:,1]
rdat = r[0][2] * self.spectrumRandom()[:,1]
# Construct an output array with columns energy, experimental intensity,
# calculated best fit intensity, random component, oriented component.
data = zeros((cdat.shape[0], 5))
data[:,0] = cdat[:,0]
data[:,1] = array([y(x-energy_offset) for x in self.cmpts[0,:,0]])
data[:,2] = r[0][3] * cdat[:,1] + (1.0 - r[0][3]) * rdat
data[:,3] = (1.0 - r[0][3]) * rdat
data[:,4] = r[0][3] * cdat[:,1]
return r[0][0], r[0][1],r[0][2],r[0][3], data
def bestFitToFile(self, filename, energy_range=None,exp_offset=0.0, comp_offset=0.0):
""" theta, phi, I0, data = Spectra.bestFitToFile(filename, energy_range=None, energy_offset=0.0)
Same as bestFitToExperiment but loads automatically from a file. The
pretty safe assumption here is that the experimental energy axis is in eV
whereas the stored spectra here are all in Hartree, so the spectra are
returned in Hartree. There are two offsets allowed, with exp_offset applied
to the X-axis of the experimental data and assumed to be in eV, and
comp_offset passed directly to bestFitToExperiment.
"""
# Load experimental data, offset and convert to Ha.
expdat = loadtxt(filename)
expdat[:,0] = eV2hartree(expdat[:,0] - exp_offset)
return self.bestFitToExperiment(expdat, energy_range, comp_offset)
class Atom:
""" Atom class: a single atom, as represented by, for example, a
PAW data set.
Create using Atom("my_atom.paw"), where the input file is the
abinit-formatted pseudopotential file.
"""
def __init__(self, pawfile):
""" atom = Atom(pawfile)
Creates an atom object by reading a abinit PAW dataset.
"""
# In the interests of future-proofing, offload to an internal.
self.loadFromAbinit(pawfile)
def loadFromAbinit(self, pawfile):
""" Atom.loadFromAbinit(pawfile)
Internal: initializes atom data from abinit PAW file.
"""
f = open(pawfile)
lines = f.readlines()
f.close()
# Grab the important bits from the header
if "All-electron core wavefunctions" in lines[0]:
self.element = lines[0].split()[9]
self.generation = lines[0].split("-")[3].strip()
self.method = int(lines[1].split()[0])
self.nspinor = int(lines[1].split()[1])
self.nsppol = int(lines[1].split()[2])
self.zatom = float(lines[2].split()[0])
self.zion = float(lines[2].split()[1])
self.pspdat = int(lines[2].split()[2])
self.pspcod = int(lines[3].split()[0])
self.pspxc = int(lines[3].split()[1])
self.lmax = int(lines[3].split()[2])
self.pspfmt = lines[4].split()[0]
self.creator = int(lines[4].split()[1])
self.basis_size = int(lines[5].split()[0])
self.lmn_size = int(lines[5].split()[1])
self.orbitals = [int(x) for x in lines[6].split()[0:self.basis_size]]
self.number_of_meshes = int(lines[7].split()[0])
self.mesh_info = []
self.mesh = []
for i in range(self.number_of_meshes):
meshbits = lines[8+i].split()
tmpdict = {}
tmpdict["type"] = int(meshbits[1])
tmpdict["size"] = int(meshbits[2])
tmpdict["rad_step"] = float(meshbits[3])
tmpdict["log_step"] = float(meshbits[4])
self.mesh_info.append(tmpdict)
j = arange(1,tmpdict["size"]+1)
if tmpdict["type"] == 1:
# Linear grid?
self.mesh.append(tmpdict["rad_step"] * j)
elif tmpdict["type"] == 2:
self.mesh.append(tmpdict["rad_step"] * (exp(tmpdict["log_step"] * (j - 1)) - 1))
self.r_max = float(lines[8+self.number_of_meshes].split()[0])
data = lines[9+self.number_of_meshes:]
else:
self.element = lines[0].split()[5]
self.generation = lines[0].split("-")[1].strip()
self.zatom = float(lines[1].split()[0])
self.zion = float(lines[1].split()[1])
self.pspdat = int(lines[1].split()[2])
self.pspcod = int(lines[2].split()[0])
self.pspxc = int(lines[2].split()[1])
self.lmax = int(lines[2].split()[2])
self.lloc = int(lines[2].split()[3])
self.mmax = int(lines[2].split()[4])
self.r2well = float(lines[2].split()[5])
self.pspfmt = lines[3].split()[0]
self.creator = int(lines[3].split()[1])
self.basis_size = int(lines[4].split()[0])
self.lmn_size = int(lines[4].split()[1])
self.orbitals = [int(x) for x in lines[5].split()[0:self.basis_size]]
self.number_of_meshes = int(lines[6].split()[0])
self.mesh_info = []
self.mesh = []
for i in range(self.number_of_meshes):
meshbits = lines[7+i].split()
tmpdict = {}
tmpdict["type"] = int(meshbits[1])
tmpdict["size"] = int(meshbits[2])
tmpdict["rad_step"] = float(meshbits[3])
tmpdict["log_step"] = float(meshbits[4])
self.mesh_info.append(tmpdict)
j = arange(1,tmpdict["size"]+1)
if tmpdict["type"] == 1:
# Linear grid?
self.mesh.append(tmpdict["rad_step"] * j)
elif tmpdict["type"] == 2:
self.mesh.append(tmpdict["rad_step"] * (exp(tmpdict["log_step"] * (j - 1)) - 1))
self.r_cut = float(lines[7+self.number_of_meshes].split()[0])
self.shape_type = int(lines[8+self.number_of_meshes].split()[0])
self.rshape = float(lines[8+self.number_of_meshes].split()[1])
data = lines[9+self.number_of_meshes:]
self.data = []
# Now go through and grab all the bits!
is_data = True
while (is_data):
# Header of the chunk specifies the length.
chunk = {}
chunk["title"] = data[0].split("=====")[1].strip()
chunk["comment"] = data[0].split("=====")[2].strip()
print chunk["title"]
# There are a few special chunks that have slightly different formatting
if "Dij0" not in chunk["title"] and "Rhoij0" not in chunk["title"]:
chunk["mesh index"] = int(data[1].split()[0])
if "VHntZC" in chunk["title"]:
try:
self.vloc_format = int(data[1].split()[1])
except ValueError:
# Some PAW files don't specify the vloc_format
self.vloc_format = 1
elif "Core wave functions" in chunk["title"]:
chunk["core n"] = int(data[2].split()[0])
chunk["core l"] = int(data[2].split()[1])
chunk["core s"] = int(data[2].split()[2])
chunk["core E"] = float(data[3].split()[0]) / 2 # Convert immediately to Ha from Ry.
chunk["core occ"] = float(data[3].split()[1])
data = data[2:]
data = data[2:]
size = self.mesh_info[chunk["mesh index"] - 1]["size"]
else:
size = self.lmn_size * (self.lmn_size + 1) * 0.5
chunk["mesh index"] = -1
data = data[1:]
chunk["data"] = []
got_data = False
while not got_data:
for val in [float(x) for x in data[0].split()]:
chunk["data"].append(val)
if len(chunk["data"]) == size:
data = data[1:]
got_data = True
else:
data = data[1:]
chunk["data"] = array(chunk["data"])
self.data.append(chunk)
if len(data) == 0:
is_data = False
if "paw" in self.pspfmt:
# Expand rhoij and dij matrices into a triangular matrix.
for i, d in enumerate(self.data):
if "Rhoij0" in d['title']:
self.iRhoij0 = i
elif "Dij0" in d['title']:
self.iDij0 = i
self.Dij0 = zeros((self.lmn_size, self.lmn_size))
self.Rhoij0 = zeros((self.lmn_size, self.lmn_size))
for row in range(1, self.lmn_size+1):
for column in range(1, row+1):
self.Dij0[row - 1, column - 1] = self.data[self.iDij0]['data'][(row - 1 + (row -1 )** 2) / 2 + column - 1]
self.Rhoij0[row - 1, column - 1] = self.data[self.iRhoij0]['data'][(row - 1 + (row - 1) ** 2) / 2 + column - 1]
# Make interpolators for the radial data so we don't need to construct
# them every time we need them.
self.interpolators = {}
for i, d in enumerate(self.data):
if d['mesh index'] != -1:
self.interpolators[i] = interp1d(self.mesh[d['mesh index'] - 1], d['data'])
def radial2Cart(self, data_index, r, r0):
""" psi = Atom.radial2Cart(data_index, r, r0)
Given an atomic centre r0, calculate the actual value of the requested function
in realspace at position r.
r, r0 are expected to be 3-element numpy arrays.
"""
# Check we're within the PAW cutoff radius.
if "paw" in self.pspfmt:
rcut = self.r_cut
elif "core" in self.pspfmt:
rcut = self.r_max
if norm(r - r0) > rcut:
return 0
if self.data[data_index]['mesh index'] == -1:
print "Requested data does not represent a radial function."
return None
else:
#ur = self.data[data_index]['data']
#rr = self.mesh[self.data[data_index]['mesh index'] - 1]
u = self.interpolators[data_index]
# Use the title of the data to figure out which orbital
# this is (hence the angular momentum)
l = self.orbitals[int(self.data[data_index]['title'].split()[-1])-1]
# Get the spherical coordinates of our displacement vector
s = r - r0
rho = norm(s)
# Need a different treatment if rho == 0.
if rho > 0:
theta = arccos(s[2]/rho)
phi = arctan2(s[1], s[0])
# We aren't doing this in the presence of a magnetic field
# so we can just use m=0 for our spherical harmonic.
return u(rho) / rho * sph_harm(0,l,theta,phi)
else:
theta = 0.0
phi = arctan2(s[1],s[0]) # Doesn't really matter what this is
# Use a 3rd order polynomial fit to points close to the origin to extrapolate.
y = [u(x)/x for x in [0.001, 0.002, 0.003]]
z = polyfit([0.001, 0.002, 0.003], y, 3)
return poly1d(z)(0.0) * sph_harm(0,l,theta,phi)
def expandOntoGrid(self, data_index, ngrid, avec, r0):
""" grid = Atom.expandOntoGrid(, data_index, ngrid, avec, r0)
Expand the specified dataset onto a grid with grid
dimensions ngrid and unit vectors avec[i]/ngrid[i].
Place the atomic sphere at position r0.
"""
grid = zeros(ngrid, dtype="complex")
for i in range(ngrid[0]):
for j in range(ngrid[1]):
for k in range(ngrid[2]):
r = (1.0 * i) /ngrid[0] * avec[0] + (1.0 * j)/ngrid[1] * avec[1] + (1.0 * k)/ngrid[2] * avec[2]
grid[i,j,k] = self.radial2Cart(data_index, r, r0)
# Renormalize.
#inorm = integrate_grids(grid,grid)
#return grid/sqrt(inorm)
return grid
class Atoms:
""" Atoms class: a collection of atoms (possibly with a crystal structure)
Create using Atoms(xsf_file), Atoms(xyz_file), Atoms(abinit_input), etc.
"""
nsteps = 1 # > 1 if object has time-series data
# (ie is animated)
is_crystal = True # True if we expect to have lattice vectors
lattice = [] # List of lists of 3 vectors.
positions = [] # List of lists of atomic positions.
forces = [] # Same for forces...
species = [] # and species.
densities = [] # List of 3D arrays
wfks = []
filehook = 0 # For *really big files* it's better to
# keep a filehook and just read as necessary.
# An example is any of the larger NetCDF output
# files from Abinit.
ngrid = [] # size of real-space grid.
test = [] # Test output
def clean(self):
""" Atoms.clean()
Internal - wipes out data in the instance object, DON'T USE!
"""
self.is_crystal = True
self.lattice = []
self.positions = []
self.forces = []
self.species = []
self.densities = []
self.wfks = []
self.ngrid = []
if self.filehook:
self.filehook.close()
self.filehook = 0
def __init__(self, filename, filetype="XSF", options=None):
""" atoms = Atoms(filename, filetype="XSF")
Creates an Atoms object from some input file. You have to
specify the type of file here. Can be:
"XSF" : XCrysden Structure Format, can also be animated axsf.
"abinit" : Abinit input file.
"abi_density" : Abinit _DEN file.
"elk" : Elk input file.
"castep" : CASTEP .cell file.
"NetCDF" : Abinit NetCDF output. Note: this importer is not very clever
and will put in default values for the species if they cannot be found -
default is all atoms are carbon.
"ETSF" : Read from ETSF-formatted NetCDF output.
"""
if filetype == "XSF":
self.loadFromXSF(filename)
elif filetype == "abinit":
self.loadFromAbinit(filename)
elif filetype == "abi_density":
self.loadFromAbinitDensity(filename)
elif filetype == "abi_wfk":
self.loadFromAbinitWFK(filename)
elif filetype == "elk":
self.loadFromElk(filename)
elif filetype == "NetCDF":
self.loadFromNetCDF(filename)
elif filetype == "ETSF":
self.loadFromETSF(filename)
elif filetype == "castep":
self.loadFromCastep(filename)
elif filetype == "VASP":
self.loadFromVASP(filename, options)
else:
print "(esc_lib.Atoms.__init__) ERROR: File type %s not handled at present." % filetype
return None
def loadFromCastep(self, filename):
""" atoms = Atoms.loadFromCASTEP(filename)
Internal, inits an Atoms object from a CASTEP .cell file. Note that
we don't handle a lot of units here: specifically, we check for the
presence of an explicit units line but only convert for angstrom or bohr.
We try to handle as much of the input file flexibility as possible:
statements such as positions_frac and positionsfrac and PositionsFRAC and
so on are all recognized, and we remove comments and blanks before the
parse so they don't get in the way. We also allow the assignment variants
kpoint_mp_grid=1 1 1 or kpoint_mp_grid: 1 1 1 or kpoint_mp_grid 1 1 1.
TODO: The lattice_abc block.
"""
self.clean()
f = open(filename, 'r')
data = f.readlines()
f.close()
# Strip away all comments and end of line comments.
data = remove_comments(data, "!")
# CASTEP is case-independent so we should be too.
data = [s.lower() for s in data]
if DEBUG:
for i, line in enumerate(data):
print i, line
# Line-by-line parse
i = 0
postype = "None"
while i < len(data):
line = data[i]
if line.split()[0] == "%block":
btitle = line.split()[1]
btitle = "".join(btitle.split("_"))
if DEBUG:
print "Found block: ", btitle
if btitle == "latticecart":
# Either three or four lines in this block.
if data[i+4].split()[0] == "%endblock":
# No units, assume angstrom.
vec1 = array([float(x) for x in data[i+1].split()])
vec2 = array([float(x) for x in data[i+2].split()])
vec3 = array([float(x) for x in data[i+3].split()])
self.lattice.append(ang2bohr([vec1, vec2, vec3]))
i = i + 4
elif data[i+5].split()[0] == "%endblock":
units = data[i+1].split()[0].lower()
vec1 = array([float(x) for x in data[i+2].split()])
vec2 = array([float(x) for x in data[i+3].split()])
vec3 = array([float(x) for x in data[i+4].split()])
if units == "ang" or units == "angstrom":
self.lattice.append(ang2bohr([vec1, vec2, vec3]))
elif units == "bohr":
self.lattice.append([vec1, vec2, vec3])
i = i + 5
elif btitle == "positionsabs":
# Loop to the end of this block
postype = "absolute"
pos = []
specs = []
unit = "ang"
for j in range(i+1,len(data)):
if data[j].split()[0] == "%endblock":
i = j
break
elif len(data[j].split()) == 1:
unit = data[j].split()[0].lower()
else:
specs.append(getElementZ(data[j].split()[0]))
pos.append(array([float(s) for s in data[j].split()[1:4]]))
if unit == "ang" or unit == "angstrom":
self.positions.append(ang2bohr(pos))
elif unit == "bohr":
self.positions.append(pos)
self.species.append(specs)
elif btitle == "positionsfrac":
# Loop to the end of this block
postype = "fractional"
pos = []
specs = []
for j in range(i+1,len(data)):
if data[j].split()[0] == "%endblock":
i = j
break
else:
specs.append(getElementZ(data[j].split()[0]))
pos.append(array([float(s) for s in data[j].split()[1:4]]))
self.species.append(specs)
else:
# Line is outside a block
# Look for "=" or ":"
if ":" in line:
option = "".join(line.split(":")[0].split("_"))
value = line.split(":")[1].strip()
elif "=" in line:
option = "".join(line.split("=")[0].split("_"))
value = line.split("=")[1].strip()
else:
option = "".join(line.split()[0].split("_"))
value = " ".join(line.split()[1:]).strip()
# Since we don't actually need the values, we just print them.
if DEBUG:
print "Found option: ", option, " with value ", value
i = i + 1
if postype == "fractional":
self.positions.append(reduced2cart(pos, self.lattice[0]))
def loadFromETSF(self, filename):
""" atoms = Atoms.loadFromETSF(filename)
Internal, inits an Atoms object from an ETSF NetCDF output. Can get this
from Abinit using pawprtwf, for example.
"""
self.clean()
f = netcdf.netcdf_file(filename, 'r')
pos = [array(x) for x in f.variables['reduced_atom_positions'].data]
avec = [array(x) for x in f.variables['primitive_vectors'].data]
self.positions.append(reduced2cart(pos,avec))
self.lattice.append(avec)
self.recip_lattice = [recip_lattice(avec)]
n1 = f.dimensions['number_of_grid_points_vector1']
n2 = f.dimensions['number_of_grid_points_vector2']
n3 = f.dimensions['number_of_grid_points_vector3']
self.ngrid = [n1, n2, n3]
# Generate G-vectors (we need them for WF calculations)
#self.g_vectors, self.g_coords,self.max_g = g_vectors(self.recip_lattice[0],self.ngrid)
znucl = f.variables['atomic_numbers'].data
self.species = [[int(znucl[x-1]) for x in f.variables['atom_species'].data]]
self.filehook = f
def loadFromNetCDF(self, filename):
""" atoms = Atoms.loadFromNetCDF(filename)
Internal, inits an Atoms object from an Abinit NetCDF output.
Note that we are *expecting* something like a _HIST file with limited
variables. At minimum, we need to have xcart, natom and rprimd in the
output. The _OUT.nc files do not have xcart at present (Abinit 6.12.1).
Also note that since the _HIST file doesn't contain typat/znucl variables,
we just set everything to carbon by default.
"""
self.clean()
f = netcdf.netcdf_file(filename, 'r')
self.nsteps = len(f.variables['mdtime'].data)
if DEBUG: print self.nsteps, f.variables['mdtime'].data
xcart = f.variables['xcart'].data
rprimd = f.variables['rprimd'].data
fcart = f.variables['fcart'].data
natom = f.dimensions['natom']
for i in range(self.nsteps):
self.lattice.append(rprimd[i])
self.positions.append([xc for xc in xcart[i]])
self.forces.append([fc for fc in fcart[i]])
self.species.append(natom * [6])
def loadFromElk(self, filename):
""" atoms= Atoms.loadFromElk(filename)
Internal, inits an Atoms object from an elk input file.
"""
self.clean()
f = open(filename, 'r')
# Construct our datastream
lines = f.readlines()
f.close()
data = remove_comments(lines, "#")
data = remove_comments(lines, "!")
data = remove_comments(lines, ":")
data = " ".join(data).split()
# Read scales and primitive vectors
s = elk_array(data, "scale", 1)
if s is None:
s = 1.0
s1 = elk_array(data, "scale1", 1)
if s1 is None:
s1 = 1.0
s2 = elk_array(data, "scale2", 1)
if s2 is None:
s2 = 1.0
s3 = elk_array(data, "scale3", 1)
if s3 is None:
s3 = 1.0
avec = elk_array(data, "avec", 9, newshape=(3,3))
if avec is None:
avec = array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]).reshape((3,3))
avec = s * avec
avec[0,:] = s1 * avec[0,:]
avec[1,:] = s2 * avec[1,:]
avec[2,:] = s3 * avec[2,:]
self.lattice = [[array(x) for x in avec.tolist()]]
# Parse the atoms block
start = data.index("atoms")
nspecies = int(data[start+1])
curpos = start+2
spec = []
pos = []
for i in range(nspecies):
spfname = data[curpos].strip("'").strip('"').split(".")[0]
ncurspec = int(data[curpos+1])
spec = spec + ncurspec * [getElementZ(spfname)]
for j in range(ncurspec):
pstart = curpos+2 + j*6
pos.append(array([float(x) for x in data[pstart:pstart+3]]))
curpos = pstart + 6
self.species.append(spec)
# Need to check if the molecule flag is set: if not, convert
# from reduced to cart coords.
if "molecule" in data:
if data[data.index("molecule") + 1].lower() == ".true.":
self.is_crystal = False
else:
self.is_crystal = True
else:
self.is_crystal = True
if self.is_crystal:
pos = reduced2cart(pos, avec)
self.positions.append(pos)
def loadFromAbinitWFK(self, wfk_file):
""" atoms = Atoms.loadFromAbinitWFK(dens_file)
Internal, inits an Atoms object from a _WFK file written
by abinit.
"""
self.clean()
# Libabitools does the work here (fortran library) because
# we're potentially reading in a gigantic file.
io.wavefunction(wfk_file)
self.lattice = [[array(x) for x in io.rprimd.tolist()]]
self.positions = [reduced2cart([array(x) for x in io.xred.T.tolist()], self.lattice[0])]
self.species = [[int(io.znucltypat[x-1]) for x in io.typat]]
def loadFromAbinitDensity(self, dens_file):
""" atoms= Atoms.loadFromAbinitDensity(dens_file)
Internal, inits an Atoms object from a _DEN file written
by abinit.
"""
self.clean()
# The libabi2py fortran library does the hard work
# here, we just need to run the density routine and
# read out the values into the appropriate python
# variables.
io.density(dens_file)
nx = io.ngfft[0]
ny = io.ngfft[1]
nz = io.ngfft[2]
dens = zeros((nx, ny, nz))
for k in range(nz):
for j in range(ny):
for i in range(nx):
dens[i,j,k] = io.rhor[i + nx * j + nx * ny * k]
dens2 = io.rhor.reshape((nx, ny, nz), order='F')
self.lattice = [[array(x) for x in io.rprimd.tolist()]]
self.positions = [reduced2cart([array(x) for x in io.xred.T.tolist()], self.lattice[0])]
self.species = [[int(io.znucltypat[x-1]) for x in io.typat]]
self.densities = [dens2]
def loadFromAbinit(self, abinit_input):
""" atoms = Atoms.loadFromAbinit(abinit_input)
Internal, inits an Atoms object from an abinit input file. We
only allow default values for rprim - input files must contain:
natom, ntypat, typat, znucl, acell, [xangst, xred, xcart]
"""
# Destroy current data
self.clean()
f = open(abinit_input, 'r')
lines = f.readlines()
f.close()
data = remove_comments(lines, "#")
data = remove_comments(lines, "!")
data = " ".join(data).split()
# Alrighty. Get the number and type of atoms, then generate our species
# list.
n = abinit_int(data, "natom", 1)
ntypat = abinit_int(data, "ntypat", 1)
znucl = abinit_int(data, "znucl", ntypat)
# znucl has to be a list
if type(znucl) == type(1):
znucl = [znucl]
typat = abinit_int(data, "typat", n)
if type(typat) == type(1):
typat = [typat]
# Double brackets because species, positions, etc are all timestep-friendly.
self.species = [[znucl[x-1] for x in typat]]
acell = abinit_value(data, "acell", 3)
if acell is None:
acell = array([1.0, 1.0, 1.0])
else:
acell_unit = abinit_unit(data, "acell")
print acell_unit
if acell_unit == "ang":
acell = ang2bohr(acell)
scalecart = abinit_value(data, "scalecart", 3)
if scalecart is None:
scalecart = array([1.0, 1.0, 1.0])
rprim = abinit_array(data, "rprim", 9)
if rprim is None:
rprim = array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]).reshape((3,3))
lat = zeros((3,3))
for i in [0, 1, 2]:
lat[i,:] = rprim[i,:] * acell[i]
print rprim[i,:], acell[i], lat[i,:]
for i in [0, 1, 2]:
lat[:,i] = lat[:,i] * scalecart[i]
self.lattice = [lat]
# Try to get positions in the order: xred, xcart, xangst
pos = abinit_array(data, "xred", n * 3)
if pos is None:
pos = abinit_array(data, "xcart", n * 3)
if pos is None:
pos = abinit_array(data, "xangst", n * 3)
if pos is None:
raise ESCError("loadFromAbinit", "ERROR: Must have at least one of xred, xcart or xangst specified in the abinit input file.")
else:
pos = ang2bohr(pos)
else:
# Use variable lat to convert xred to actual positions.
pos = array(mat(pos) * mat(lat))
self.positions = [[array(x) for x in pos.tolist()]]
def loadFromVASP(self, vasp_file, ions=None):
""" atoms = Atoms.loadFromVASP(vasp_file, ions=[])
Internal, inits an Atoms object from a VASP POSCAR or CONTCAR file. Very rudimentary
for the present but good enough for converting from CONTCAR to other formats.
Neither the POSCAR nor CONTCAR files actually specify what species are listed,
so you can give a list (strings like 'C' or numbers like 6) of the species to match
those in the file, using the ions input.
"""
# Destroy our current data
self.clean()
# We must be a crystal, and NO animations here.
self.is_crystal = True
self.nsteps = 1
f = open(vasp_file)
if not f:
raise ESCError("File %s could not be opened - exiting.")
lines = f.readlines()
f.close()
# The POSCAR/CONTCAR format is a line-position based format. First line is a
# comment, second line is the scale of the unit cell.
scale = float(lines[1].split()[0])
# Next three lines are the lattice vectors
a = ang2bohr(array([float(x) for x in lines[2].split()[0:3]])) * scale
b = ang2bohr(array([float(x) for x in lines[3].split()[0:3]])) * scale
c = ang2bohr(array([float(x) for x in lines[4].split()[0:3]])) * scale
# Now, depending on the version of VASP we start to get in a spot of bother,
# so we need to look ahead. If there are two lines of numbers, the first line
# gives the species, the second gives the species *counts*. If there is only
# one line of numbers, it is just the species counts. Then the Selective Dynamics
# line is OPTIONAL and then finally we must have either Direct or Cartesian. So
# look ahead until we get either direct or cartesian (only the first letter matters).
for i,l in enumerate(lines[5:]):
if l[0] in ['d', 'D', 'c', 'C']:
if DEBUG:
print "Found Direct/Cart at line %d." % i
# First mark whether it is Direct or Cartesian for later.
if l[0] in ['d', 'D']:
ptype = 'Direct'
else:
ptype = 'Cartesian'
if i+5 == 6:
# No Selective Dynamics, no species.
# So, lines[5] is the set of ion counts.
cline = 5
iline = None
pline = 7
elif i+5 == 7:
# Either Selective Dynamics or species are included (not both).
pline = 8
if lines[6][0] in ['S', 's']:
# Selective Dynamics, so species NOT included.
cline = 5
iline = None
else:
# Species at line 5, counts at line 6
iline = 5
cline = 6
elif i+5 == 8:
pline = 9
# Both selective Dynamics and species.
iline = 5
cline = 6
break
# The ions input overrides the iline if it is present.
ion_counts = [int(x) for x in lines[cline].split()]
if ions is not None:
species = []
for i, ic in enumerate(ion_counts):
species += ic * [getElementZ(ions[i])]
elif iline is not None:
ion_species = [int(x) for x in lines[iline].split()]
for i, ic in enumerate(ion_counts):
species += ic * [getElementZ(ion_species[i])]
else:
species = []
# If there are no ions specified, just start at H and work up the integers.
for i, ic in enumerate(ion_counts):
species += ic * [i+1]
positions = []
for i in range(sum(ion_counts)):
pos = array([float(x) for x in lines[pline+i].split()[0:3]])
if ptype == 'Direct':
pos = pos[0] * a + pos[1] * b + pos[2] * c
else:
pos = ang2bohr(pos)
positions.append(pos)
self.species.append(species)
self.positions.append(array(positions))
self.lattice.append([a, b, c])
def loadFromXSF(self, xsf_file):
""" atoms = Atoms.loadFromXSF(xsf_file)
Internal, inits an Atoms object from an xsf file. Note that we can deal with
animated (AXSF) files just fine.
"""
# Destroy our current data
self.clean()
f = open(xsf_file)
if not f:
raise ESCError("File %s could not be opened - exiting.")
lines = f.readlines()
f.close()
data = remove_comments(lines, "#")
keywords = []
blocks = []
# Locate all keywords
for i, line in enumerate(data):
bits = line.split()
for kw in xsf_keywords:
if kw in bits:
keywords.append(kw)
blocks.append(i)
# Cycle through the keywords and deal with each block.
for i, (s, kw) in enumerate(zip(blocks, keywords)):
if kw == "ANIMSTEPS":
self.nsteps = int(data[s].split()[1])
if kw == "CRYSTAL":
self.is_crystal = True
if kw == "PRIMVEC":
a = ang2bohr(array([float(x) for x in data[s+1].split()[0:3]]))
b = ang2bohr(array([float(x) for x in data[s+2].split()[0:3]]))
c = ang2bohr(array([float(x) for x in data[s+3].split()[0:3]]))
self.lattice.append([a, b, c])
if kw == "PRIMCOORD":
nat = int(data[s+1].split()[0])
positions = []
forces = []
species = []
for j in range(nat):
bits = data[s+2+j].split()
species.append(getElementZ(bits[0]))
positions.append(ang2bohr(array([float(x) for x in bits[1:4]])))
try:
forces.append(ang2bohr(array([float(x) for x in bits[4:7]])))
except (ValueError, IndexError):
forces.append(array([0.0, 0.0, 0.0]))
self.positions.append(positions)
self.forces.append(forces)
self.species.append(species)
if kw == "ATOMS":
# THIS SECTION IS BUGGY!
positions = []
forces = []
species = []
try:
s1 = blocks[i+1]
except (IndexError):
s1 = len(data)
for j in range(s+1, s1):
bits = data[j].split()
species.append(getElementZ(bits[0]))
positions.append(ang2bohr(array([float(x) for x in bits[1:4]])))
try:
forces.append(ang2bohr(array([float(x) for x in bits[4:7]])))
except (ValueError, IndexError):
forces.append(array([0.0, 0.0, 0.0]))
self.positions.append(positions)
self.forces.append(forces)
self.species.append(species)
# def gradWF(self, spin, kpt, band, spinor):
# """ gx, gy, gz = Atoms.gradWF(spin,kpt,band,spinor)
#
# If this Atoms instance has a filehook to a ETSF WF file, return
# the gradient of a specific wavefunction using a FFT method.
#
# The returned grids are the components of the complex gradient
# with respect to the cartesian axes.
#
# """
#
# psi = self.getRealSpaceWF(spin,kpt,band,spinor)
# reduced_k = self.filehook.variables['reduced_coordinates_of_kpoints'][kpt]
# K = reduced2cart(reduced_k, self.recip_lattice[0])
#
# print "Got psi and K"
# psiG = fftn(psi)
# print "Got past fftn!"
#
# speed.grad_wf(K, self.g_vectors, psiG)
# grad = speed.grd[:]
#
# print "Got past grad!"
# gx = grad[:,:,:,0]
# gy = grad[:,:,:,1]
# gz = grad[:,:,:,2]
#
# return ifftn(gx), ifftn(gy), ifftn(gz)
def writeWFCube(self, filename, spin,kpt,band,spinor, option="density"):
""" succeeded = Atoms.writeWaveFunctionCube(filename, spin, kpt, band, spinor, option="density")
If this Atoms instance has a filehook (must point to an open and valid ETSF-formatted
NetCDF file object), read a wavefunction and write to a CUBE file.
Note we can't really deal with complex numbers in the CUBE format so have to
choose an option - density, mod, real or imag.
"""
wf = self.getRealSpaceWF(spin,kpt,band,spinor)
if option == "real":
d = real(wf)
elif option == "imag":
d = imag(wf)
elif option == "density":
d = real(wf) ** 2 + imag(wf) ** 2
elif option == "mod":
d = sqrt(real(wf) ** 2 + imag(wf) ** 2)
return write_cube(filename, self.positions, self.species, self.lattice, d)
def getRealSpaceWF(self, spin, kpt, band, spinor):
""" wf = Atoms.getRealSpaceWaveFunction(spin, kpt, band, spinor)
If this Atoms instance has a filehook (pointing to an open ETSF-formatted NetCDF file
object), use it to read the realspace wavefunction for a single spin, kpt etc.
"""
wf_real = self.filehook.variables['real_space_wavefunctions'][spin,kpt,band,spinor,:,:,:,0]
wf_imag = self.filehook.variables['real_space_wavefunctions'][spin,kpt,band,spinor,:,:,:,1]
return wf_real + 1j*wf_imag
def getBondLengths(self, cutoff=3.0, animstep=0, give_species=False):
""" bonds = Atoms.getBondLengths(cutoff=3.0, animstep=1, give_species=False)
Returns a list of bond specs [i, j, length] for all pairwise
distances less than the cutoff distance (default: 3.0 Bohr) for
the specified animation step (default: first step, ie 0).
If give_species=True, the bond spec includes the species abbreviation:
[i, Zi, j, Zj, length]
"""
return getBondLengths(self.positions[animstep], self.species[animstep], cutoff, give_species)
def orderAtoms(self, order=None):
""" new_order = Atoms.orderAtoms(order=None)
Takes the positions and species and reorders them (for example, if the
species alternate C, H, C, C, N, H, O, etc, can reorder to C C H H N O)
according to the passed list order. If order is None, the ordering is done
by calling uniqify(self.species[0]), the order of the elements in the
first timestep.
"""
if order is None:
my_order = uniqify(self.species[0])
else:
my_order = uniqify(order)
for t in range(len(self.species)):
newpos = []
newspec = []
pos = self.positions[t]
spec = self.species[t]
for o in my_order:
for i in range(len(spec)):
if getElementZ(spec[i]) == getElementZ(o):
newspec.append(spec[i])
newpos.append(pos[i])
self.positions[t] = newpos
self.species[t] = newspec
return my_order
def listSpecies(self,spec):
""" spec_idx = Atoms.printSpecies(spec)
Returns a list of indices of all atoms of type spec. Spec can be given
as a Z number or an abbreviation (H, Mg, Ca, etc). Note these are zero-
based indices that can be used within the Atoms object itself, and not
the 1-based atom indices used by Abinit, for example.
"""
spec_idx = []
for i in range(len(self.species[0])):
if self.species[0][i] == getElementZ(spec):
spec_idx.append(i)
return spec_idx
def autoUnLars(self, animstep=0, convtol=0.01, maxsteps=50, cutoff=2.2):
""" new_pos, pos_hist = Atoms.autoUnLars(animstep=0, convtol=0.01, maxsteps=50, cutoff=2.2)
Attempts to fix positions generated by Lars automatically. This
might not work very well. Converges positions to convtol.
Why? Because Dr Lars Thomsen likes the gunslinging approach to chemical
construction. He laughs in the face of the 2nd Law of Thermodynamics.
"""
cur_pos = self.positions[animstep]
pos_hist = [cur_pos]
species = self.species[animstep]
bonds = self.getBondLengths(cutoff=cutoff, animstep=animstep, give_species=True)
for itercount in range(0, maxsteps):
shifts = len(cur_pos) * [array([0.0, 0.0,0.0])]
print shifts
for bond in bonds:
# Figure out what the bond length *should* be:
s1 = species[bond[0]]
s2 = species[bond[2]]
if s1 > s2:
btype = elements[s1] + elements[s2]
else:
btype = elements[s2] + elements[s1]
if btype not in bond_lengths.keys():
print "%s bond not accounted for." % btype
else:
bproper = bond_lengths[btype]
bvector = cur_pos[bond[0]] - cur_pos[bond[2]]
bactual = bond[4]
shift_factor = bproper / bactual
shifts[bond[0]] = shifts[bond[0]] + 0.5 * (shift_factor - 1.0) * bvector
shifts[bond[2]] = shifts[bond[2]] + -0.5 * (shift_factor - 1.0) * bvector
print "%s bond. Length should be %g, actual is %g. Shift factor is %g." % (btype, bproper, bactual, shift_factor)
# Move all atoms.
cur_pos = [c+s for (c,s) in zip(cur_pos, shifts)]
pos_hist.append(cur_pos)
# Check convergence
diff = 0
for shift in shifts:
diff += norm(shift)
if diff < convtol:
return cur_pos, pos_hist
# Update our bond lengths
bonds = getBondLengths(cur_pos, species, cutoff=cutoff, give_species=True)
return cur_pos, pos_hist
def generateConstraints(self, constrained_atoms):
""" text_constraints = Atoms.generateConstraints(constrained_atoms)
Generates the %BLOCK IONIC_CONSTRAINTS text to fix the listed atoms and
returns the text. That's all. CASTEP-style constraints. Note that the
timestep is irrelevant here so we always take from timestep 0.
The list of constrained atoms is 1-based, not 0-based, to match with
the style used in CASTEP.
"""
text_constraints = ["%block ionic_constraints"]
count = 0
for i in constrained_atoms:
spec = elements[self.species[0][i-1]] # i-1, not i, because 1-based.
# To find the ion number, we count from the first occurrence of the same
# species.
ion = i - self.species[0].index(self.species[0][i-1])
text_constraints.append(" %d %s %d 1.0 0.0 0.0" % (count+1, spec, ion))
text_constraints.append(" %d %s %d 0.0 1.0 0.0" % (count+2, spec, ion))
text_constraints.append(" %d %s %d 0.0 0.0 1.0" % (count+3, spec, ion))
count += 3
text_constraints.append("%endblock ionic_constraints")
return "\n".join(text_constraints)
def rotateAtoms(rotation_file, fileopt=0, timestep=0):
""" success = Atoms.rotateAtoms(rotation_file, fileopt=0)
Apply the rotations specified in rotation_file to an Atoms object positions.
We can deal with animation steps here (Default is 0). See the rotate_positions
documentation for details on fileopt.
"""
pos = self.positions[timestep]
self.positions[timestep] = rotate_positions(pos, rotation_file, fileopt)
return True
def writeXSF(self, filename):
""" success = Atoms.writeXSF(filename)
Member function wrapper for esc_lib.write_xsf.
"""
return write_xsf(filename, self.positions, self.species, self.lattice)
def writeAbinit(self, filename, xtype="ang", opt=None, timestep=0):
""" success = Atoms.writeAbinit(filename, xtype="ang", opt=None, timestep=0)
Member function wrapper for esc_lib.write_abinit.
"""
return write_abinit(filename, self.positions, self.species, xtype, opt, timestep)
def writeCastep(self, filename, xtype="ang", opt=None, timestep=0):
""" success = Atoms.writeCastep(filename, xtype="ang", opt=None, timestep=0)
Member function wrapper for esc_lib.write_castep.
"""
return write_castep(filename, self.positions, self.species, self.lattice, xtype, opt, timestep)
def writeAims(self, filename, xtype="ang", opt=None, timestep=0):
""" success = Atoms.writeAims(filename, xtype="ang", opt=None, timestep=0)
Member function wrapper for esc_lib.write_aims
"""
return write_aims(filename, self.positions, self.species, self.lattice, xtype, opt, timestep)
|
HSINWEI/physics
|
python/esc_lib.old.py
|
Python
|
gpl-3.0
| 109,108
|
[
"ABINIT",
"CASTEP",
"CRYSTAL",
"Elk",
"FHI-aims",
"Gaussian",
"NetCDF",
"VASP"
] |
fe8ab5f51354f34ba619fd1d8e6a0845e8c66c67227169fa8d580fb7fd39884b
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def random_attack():
def attack(train, valid, x, y):
kwargs = {}
# randomly select parameters and their corresponding values
if random.randint(0,1): kwargs['mtries'] = random.randint(1,len(x))
if random.randint(0,1): kwargs['sample_rate'] = random.random()
if random.randint(0,1): kwargs['build_tree_one_node'] = True
if random.randint(0,1): kwargs['ntrees'] = random.randint(1,10)
if random.randint(0,1): kwargs['max_depth'] = random.randint(1,5)
if random.randint(0,1): kwargs['min_rows'] = random.randint(1,10)
if random.randint(0,1): kwargs['nbins'] = random.randint(1,20)
if random.randint(0,1):
kwargs['balance_classes'] = True
if random.randint(0,1): kwargs['max_after_balance_size'] = random.uniform(0,10)
if random.randint(0,1): kwargs['seed'] = random.randint(1,10000)
do_validation = [True, False][random.randint(0,1)]
# display the parameters and their corresponding values
print "-----------------------"
print "x: {0}".format(x)
print "y: {0}".format(y)
print "validation: {0}".format(do_validation)
for k, v in zip(kwargs.keys(), kwargs.values()): print k + ": {0}".format(v)
if do_validation: h2o.random_forest(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)
else: h2o.random_forest(x=train[x], y=train[y], **kwargs)
print "-----------------------"
print "Import and data munging..."
pros = h2o.upload_file(tests.locate("smalldata/prostate/prostate.csv.zip"))
pros[1] = pros[1].asfactor()
pros[4] = pros[4].asfactor()
pros[5] = pros[5].asfactor()
pros[8] = pros[8].asfactor()
r = pros[0].runif() # a column of length pros.nrow with values between 0 and 1
# ~80/20 train/validation split
pros_train = pros[r > .2]
pros_valid = pros[r <= .2]
cars = h2o.upload_file(tests.locate("smalldata/junit/cars.csv"))
r = cars[0].runif()
cars_train = cars[r > .2]
cars_valid = cars[r <= .2]
print
print "======================================================================"
print "============================== Binomial =============================="
print "======================================================================"
for i in range(10):
attack(pros_train, pros_valid, random.sample([2,3,4,5,6,7,8],random.randint(1,7)), 1)
print
print "======================================================================"
print "============================== Gaussian =============================="
print "======================================================================"
for i in range(10):
attack(cars_train, cars_valid, random.sample([2,3,4,5,6,7],random.randint(1,6)), 1)
print
print "======================================================================"
print "============================= Multinomial ============================"
print "======================================================================"
cars_train[2] = cars_train[2].asfactor()
cars_valid[2] = cars_valid[2].asfactor()
for i in range(10):
attack(cars_train, cars_valid, random.sample([1,3,4,5,6,7],random.randint(1,6)), 2)
if __name__ == "__main__":
tests.run_test(sys.argv, random_attack)
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_algos/rf/pyunit_NOPASS_random_attack_medium.py
|
Python
|
apache-2.0
| 3,461
|
[
"Gaussian"
] |
0db50eb36ee1585187da2682e1e40bb730c1055cbd514892c99a3826bdca3f71
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes continue statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
# Tags for local state.
CONTROL_VAR_NAME = 'control_var_name'
CONTINUE_USED = 'continue_used'
GUARD_CREATED = 'guard_created'
CREATE_GUARD_NEXT = 'create_guard_next'
class ContinueCanonicalizationTransformer(transformer.Base):
"""Canonicalizes continue statements into additional conditionals."""
def visit_Continue(self, node):
self.set_local(CONTINUE_USED, True)
template = """
var_name = True
"""
return templates.replace(
template, var_name=self.get_local(CONTROL_VAR_NAME))
def _postprocess_statement(self, node):
# Example of how the state machine below works:
#
# 1| stmt # State: CONTINUE_USED = False
# | # Action: none
# 2| if cond:
# 3| continue # State: CONTINUE_USED = True,
# | # GUARD_CREATED = False,
# | # CREATE_GUARD_NEXT = False
# | # Action: set CREATE_GUARD_NEXT = True
# 4| stmt # State: CONTINUE_USED = True,
# | # GUARD_CREATED = False,
# | # CREATE_GUARD_NEXT = True
# | # Action: create `if not continue_used`,
# | # set GUARD_CREATED = True
# 5| stmt # State: CONTINUE_USED = True, GUARD_CREATED = True
# | # Action: none (will be wrapped under previously
# | # created if node)
if self.get_local(CONTINUE_USED, False):
if self.get_local(GUARD_CREATED, False):
return node, None
elif not self.get_local(CREATE_GUARD_NEXT, False):
self.set_local(CREATE_GUARD_NEXT, True)
return node, None
else:
self.set_local(GUARD_CREATED, True)
template = """
if not var_name:
original_node
"""
cond, = templates.replace(
template,
var_name=self.get_local(CONTROL_VAR_NAME),
original_node=node)
return cond, cond.body
return node, None
def _visit_loop_body(self, node, nodes):
self.enter_local_scope()
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
continue_var = self.context.namer.new_symbol('continue_', scope.referenced)
self.set_local(CONTROL_VAR_NAME, continue_var)
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
if self.get_local(CONTINUE_USED, False):
template = """
var_name = False
"""
control_var_init = templates.replace(template, var_name=continue_var)
nodes = control_var_init + nodes
self.exit_local_scope()
return nodes
def _visit_non_loop_body(self, nodes):
self.enter_local_scope(inherit=(CONTROL_VAR_NAME,))
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
continue_used = self.get_local(CONTINUE_USED, False)
self.exit_local_scope(keep=(CONTINUE_USED,))
return nodes, continue_used
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse, _ = self._visit_non_loop_body(node.orelse)
return node
def visit_For(self, node):
node.target = self.generic_visit(node.target)
node.iter = self.generic_visit(node.iter)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse, _ = self._visit_non_loop_body(node.orelse)
return node
def visit_If(self, node):
node.test = self.generic_visit(node.test)
node.body, continue_used_body = self._visit_non_loop_body(node.body)
node.orelse, continue_used_orelse = self._visit_non_loop_body(node.orelse)
self.set_local(CONTINUE_USED, continue_used_body or continue_used_orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body, _ = self._visit_non_loop_body(node.body)
return node
def transform(node, namer):
return ContinueCanonicalizationTransformer(namer).visit(node)
|
yanchen036/tensorflow
|
tensorflow/contrib/autograph/converters/continue_statements.py
|
Python
|
apache-2.0
| 5,271
|
[
"VisIt"
] |
7c19cdca3b7867f3a80efff77340380d3a70aa0d4f1c79a9a9fdf07d48c95062
|
"""
Ordered dictionary implementation.
"""
try:
from galaxy import eggs
eggs.require("six")
except ImportError:
# Allow code to operate outside Galaxy.
pass
from six.moves import UserDict
class odict(UserDict):
"""
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
This dictionary class extends UserDict to record the order in which items are
added. Calling keys(), values(), items(), etc. will return results in this
order.
"""
def __init__(self, dict=None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
new = odict()
new.update(self)
return new
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj=None):
if key not in self._keys:
self._keys.append(key)
return UserDict.setdefault(self, key, failobj)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return map(self.get, self._keys)
def iterkeys(self):
return iter(self._keys)
def itervalues(self):
for key in self._keys:
yield self.get(key)
def iteritems(self):
for key in self._keys:
yield key, self.get(key)
def __iter__(self):
for key in self._keys:
yield key
def reverse(self):
self._keys.reverse()
def insert(self, index, key, item):
if key not in self._keys:
self._keys.insert(index, key)
UserDict.__setitem__(self, key, item)
|
ssorgatem/pulsar
|
galaxy/util/odict.py
|
Python
|
apache-2.0
| 2,222
|
[
"Galaxy"
] |
24cf6d311fa4418562d5402e90c2f3d63d0ca073932c02e6be91c67e82cd017a
|
"""
Transition Guide Python Example: TRANS_read_netCDF.py
- read netCDF file
- retrieve variable informations
Test_6h.csv
2.00;3.50;5.10;8.20
2.40;3.10;4.80;8.90
2.60;3.70;5.30;10.10
2.75;3.90;5.55;10.25
3.00;4.10;6.05;10.50
2018-08-28 kmf
"""
import numpy as np
import Ngl,Nio
print("")
#-- data file name
diri = "/Users/k204045/local/miniconda2/envs/pyn_env/lib/ncarg/data/nug/"
fili = "Test_6h.csv"
#-- number of lines and columns in input file
nrows = 5
ncols = 4
#-- read all data
vals = Ngl.asciiread(diri+fili,(nrows,ncols),"float",sep=';')
#-- print information
print("vals: " + str(vals))
print("")
print("--> rank of vals: " + str(len(vals.shape)))
print("--> shape vals: " + str(vals.shape))
exit()
|
KMFleischer/PyEarthScience
|
Transition_examples_NCL_to_PyNGL/read_data/test.py
|
Python
|
mit
| 755
|
[
"NetCDF"
] |
aecf1c09ce9f44c45ee66a1cf8b97e5f185324e89e0b8065aa7f2a94945e48b8
|
"""Data Assimilation Effectiveness Quantification
Module of functions to evaluate the effectiveness of Data Assimilation
methods. Mainly the evaluation of effectiveness is done by computing the
KL-divergence between the analysis ensemble and the background ensemble along
with the data likelihood under the analysis ensemble. This can be done
explicitly for the Kalman Filter schemes which assume Gaussianity. For the
sequential Monte Carlo and Particle Filter methods we use Kernel Density
Approximation of the distributions.
"""
import numpy as np
import math
# First we compute the Kullback-Leibler Divergence for two Gaussian
# distributions. We will need to pass the ensemble and analysis
# measurement distributions to do this. The mean and covariance are
# then formed from these. An alternative would be to pass the mean and
# covariance matrices.
# <Ensemble Observation Arrays> = (measurement size)x(ensemble size)
def ensemble_KLdiv(ensemble_observations, analysis_observations):
# Collect data sizes
EnSize = ensemble_observations.shape[1]
MeaSize = ensemble_observations.shape[0]
# Calculate analysis and ensemble means
Emean = (1./float(EnSize))*ensemble_observations.sum(1)
Amean = (1./float(EnSize))*analysis_observations.sum(1)
# Compute covariance matrices
dE = ensemble_observations - np.tile(Emean.reshape(MeaSize,1),(1,EnSize))
dA = analysis_observations - np.tile(Amean.reshape(MeaSize,1),(1,EnSize))
Ecov = (1./float(EnSize-1))*np.dot(dE,dE.transpose())
Acov = (1./float(EnSize-1))*np.dot(dA,dA.transpose())
# Now compute D_{KL}(analysis | ensemble) for the Gaussians.
# We compute this in three parts
KL1 = np.trace(np.linalg.solve(Ecov,Acov))
KL2 = np.dot((Amean - Emean),np.linalg.solve(Ecov,(Amean - Emean)))
KL3 = np.linalg.det(Acov)/np.linalg.det(Ecov)
KLdiv = 0.5*(KL1 + KL2 - math.log(KL3) - MeaSize)
return KLdiv
# We return the likelihood of a data point given an analysis ensemble
# of observations. The analysis observation ensemble is used to
# compute an estimated mean and covariance. A Gaussian density with
# this mean and covariance is then evaluated at the data point to
# return the likelihood.
def GuassLikelihood(data, analysis_observations):
# Collect data sizes
EnSize = analysis_observations.shape[1]
MeaSize = analysis_observations.shape[0]
# Calculate analysis mean
Amean = (1./float(EnSize))*analysis_observations.sum(1)
# Compute covariance matrix
dA = analysis_observations - np.tile(Amean.reshape(MeaSize,1),(1,EnSize))
Acov = (1./float(EnSize-1))*np.dot(dA,dA.transpose())
# Compute Gaussian likelihood
Coef = math.sqrt(math.pow((2*math.pi),MeaSize)*np.linalg.det(Acov))
Arg = 0.5*(np.dot((data - Amean),np.linalg.solve(Acov,(data - Amean))))
GaussLikelihood = math.exp(-Arg)/Coef
return GaussLikelihood
# We return the Mahalonobis Distance between a set of observations
# and the mean of an ensemble/analysis. The distance is scaled by the
# covariance of the ensemble.
# If the data is m and the ensemble mean is mu with covariance C then
# the Mahalonobis distance is given by
# d(m,mu) = (m - mu)^T inv(C) (m - mu)
def Mdist(data, Amean, ACov):
D = math.sqrt(np.dot((data - Amean),np.linalg.solve(ACov,(data - Amean))))
return D
|
hickmank/pyda
|
pyda/utilities/DA_quant.py
|
Python
|
apache-2.0
| 3,354
|
[
"Gaussian"
] |
cefd848a48a340e32e949b54945c1cf23ef8a3e2c2ad447adbd688a988992a81
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def replicate (bonds, angles, x, y, z, Lx, Ly, Lz, xdim=1, ydim=1, zdim=1):
"""
Replicates configuration in each dimension.
This may be used to increase the size of an equilibrated melt by a factor of 8 or more.
Presently this routine works only for semiflexible polymers. A general
class should be written to deal with files containing coordinates
and topology data.
xdim = ydim = zdim = 1 returns the original system not replicated.
xdim = ydim = zdim = 2 returns the original system replicated to 8x.
xdim = ydim = zdim = 3 returns the original system replicated to 27x.
xdim = ydim = 1, zdim = 2 returns the original system replicated in the z-direction.
"""
# replicate the particles
x_replicated = x[:]
y_replicated = y[:]
z_replicated = z[:]
for i in xrange(xdim):
for j in xrange(ydim):
for k in xrange(zdim):
if(i + j + k != 0):
for x_, y_, z_ in zip(x, y, z):
x_replicated.append(x_ + i * Lx)
y_replicated.append(y_ + j * Ly)
z_replicated.append(z_ + k * Lz)
# replicate the bonds and angles
ct = 0
num_particles_original = len(x)
bonds_replicated = bonds[:]
angles_replicated = angles[:]
for i in xrange(xdim):
for j in xrange(ydim):
for k in xrange(zdim):
if(i + j + k != 0):
ct = ct + 1
for p1, p2 in bonds:
bonds_replicated.append((p1 + ct * num_particles_original, \
p2 + ct * num_particles_original))
for p1, p2, p3 in angles:
angles_replicated.append((p1 + ct * num_particles_original, \
p2 + ct * num_particles_original, \
p3 + ct * num_particles_original))
# modify the box size
Lx = xdim * Lx
Ly = ydim * Ly
Lz = zdim * Lz
return bonds_replicated, angles_replicated, x_replicated, y_replicated, z_replicated, Lx, Ly, Lz
|
kkreis/espressopp
|
src/tools/replicate.py
|
Python
|
gpl-3.0
| 2,830
|
[
"ESPResSo"
] |
57d40aa52be85e2afae39783a824a44f6401b901ff7261dce812f32d6712c4c5
|
"""Minimal orthorhombic unitcells for bulk crystals"""
from ase import Atoms, view
from math import sqrt
binary_compounds = {
'LiF': (('Li', 1), ('F' , 1)),
'LiCl': (('Li', 1), ('Cl', 1)),
'NaF': (('Na', 1), ('F' , 1)),
'NaCl': (('Na', 1), ('Cl', 1)),
'TiC': (('Ti', 1), ('C' , 1)),
'VC': (('V' , 1), ('C' , 1)),
'ZrC': (('Zr', 1), ('C' , 1)),
'NbC': (('Nb', 1), ('C' , 1)),
'HfC': (('Hf', 1), ('C' , 1)),
'ScN': (('Sc', 1), ('N' , 1)),
'TiN': (('Ti', 1), ('N' , 1)),
'VN': (('V' , 1), ('N' , 1)),
'YN': (('Y' , 1), ('N' , 1)),
'ZrN': (('Zr', 1), ('N' , 1)),
'NbN': (('Nb', 1), ('N' , 1)),
'LaN': (('La', 1), ('N' , 1)),
'HfN': (('Hf', 1), ('N' , 1)),
'MgO': (('Mg', 1), ('O' , 1)),
'CaO': (('Ca', 1), ('O' , 1)),
'MgS': (('Mg', 1), ('S' , 1)),
'MnO': (('Mn', 1), ('O' , 1)),
'FeO': (('Fe', 1), ('O' , 1)),
'CoO': (('Co', 1), ('O' , 1)),
'NiO': (('Ni', 1), ('O' , 1)),
'ZnO': (('Zn', 1), ('O' , 1)),
'FeAl': (('Fe', 1), ('Al', 1)),
'CoAl': (('Co', 1), ('Al', 1)),
'NiAl': (('Ni', 1), ('Al', 1)),
'BN': (('B' , 1), ('N' , 1)),
'BP': (('B' , 1), ('P' , 1)),
'BAs': (('B' , 1), ('As', 1)),
'AlN': (('Al', 1), ('N' , 1)),
'AlP': (('Al', 1), ('P' , 1)),
'AlAs': (('Al', 1), ('As', 1)),
'GaN': (('Ga', 1), ('N' , 1)),
'GaP': (('Ga', 1), ('P' , 1)),
'GaAs': (('Ga', 1), ('As', 1)),
'InN': (('In', 1), ('N' , 1)),
'InP': (('In', 1), ('P' , 1)),
'InAs': (('In', 1), ('As', 1)),
'SiC': (('Si', 1), ('C' , 1)),
'BN': (('B' , 1), ('N' , 1)),
'CeO2': (('Ce', 1), ('O' , 2)),
'MoSe2': (('Mo', 1), ('Se', 2))
}
def perovskite(symbol1, symbol2, symbol3, a):
"""Perovskite - Calcium Titanate"""
return Atoms(symbols='%s%s%s3' % (symbol1, symbol2, symbol3), pbc=True,
cell=[a, a, a],
scaled_positions=[(.0, .0, .0),
(.5, .5, .5),
(.5, .0, .5),
(.5, .5, .0),
(.0, .5, .5),])
def wurtzite(symbol1, symbol2, a, u=None, c=None):
"""Wurtzite - Zinc Oxide"""
if c is None:
c = sqrt(8 / 3.) * a
if u is None:
u = 3 / 8.
return Atoms(symbols='%s4%s4' % (symbol1, symbol2), pbc=True,
cell=[a, a * sqrt(3), c],
scaled_positions=[(0.00, 0.00, 0.00),
(1/2., 1/2., 0.00),
(0.00, 1/3., 1/2.),
(1/2., 5/6., 1/2.),
(0.00, 0.00, u),
(1/2., 1/2., u),
(0.00, 1/3.,u+1/2.),
(1/2., 5/6.,u+1/2.),])
def c7(symbol1, symbol2, a):
"""C7"""
z = 0.6210
c = 12.927
return Atoms(symbols='%s4%s8' % (symbol1, symbol2), pbc=True,
cell=[a, a * sqrt(3), c],
scaled_positions=[(0., 4./6., 1./4.),
(0., 2./6., 3./4.),
(1./2., 1./6., 1./4.),
(1./2., 5./6., 3./4.),
(0., 2./6., z-1./2.),
(0., 4./6., z),
(0., 4./6., -z+3./2.),
(0., 2./6., -z+1),
(1./2., 5./6., z-1./2.),
(1./2., 1./6., z),
(1./2., 1./6., -z+3./2.),
(1./2., 5./6., -z+1.),])
def fluorite(symbol1, symbol2, a):
"""Flourite - Calcium Flouride"""
return Atoms(symbols='%s4%s8' % (symbol1, symbol2), pbc=True,
cell=[a, a, a],
scaled_positions=[(.0, .0, .0),
(.0, .5, .5),
(.5, .0, .5),
(.5, .5, .0),
(.25, .25, .25),
(.25, .75, .75),
(.75, .25, .75),
(.75, .75, .25),
(.75, .75, .75),
(.75, .25, .25),
(.25, .75, .25),
(.25, .25, .75),])
def zincblende(symbol1, symbol2, a):
"""Zinc Blende - Zinc Sulfide"""
return Atoms(symbols='%s2%s2' % (symbol1, symbol2), pbc=True,
cell=[a / sqrt(2), a / sqrt(2), a],
scaled_positions=[(.0, .0, .0),
(.5, .5, .5),
(.0, .5, .75),
(.5, .0, .25),])
def cesiumchloride(symbol1, symbol2, a):
"""Cesium Chloride"""
return Atoms(symbols='%s%s' % (symbol1, symbol2), pbc=True, cell=[a, a, a],
scaled_positions=[(.0, .0, .0), (.5, .5, .5),])
def rocksalt(symbol1, symbol2, a):
"""Rock Salt - Sodium Chloride"""
return Atoms(symbols='%s2%s2' % (symbol1, symbol2), pbc=True,
cell=[a / sqrt(2), a / sqrt(2), a],
scaled_positions=[(.0, .0, .0),
(.5, .5, .5),
(.5, .5, .0),
(.0, .0, .5),])
def hcp(symbol, a, c=None):
"""Hexagonal Close-Packed Lattice"""
if c is None:
c = sqrt(8/3.) * a
return Atoms(symbols='%s4' % symbol, pbc=True,
cell=[a, a * sqrt(3), c],
scaled_positions=[(0.00, 0.00, 0.00),
(1/2., 1/2., 0.00),
(0.00, 1/3., 1/2.),
(1/2., 5/6., 1/2.),])
def fcc(symbol, a):
"""Face Centered Cubic"""
return Atoms(symbols='%s2' % symbol, pbc=True,
cell=[a / sqrt(2), a / sqrt(2), a],
scaled_positions=[(.0, .0, .0), (.5, .5, .5),])
def bcc(symbol, a):
"""Body Centered Cubic"""
return Atoms(symbols='%s2' % symbol, pbc=True, cell=[a, a, a],
scaled_positions=[(.0, .0, .0), (.5, .5, .5),])
def sc(symbol, a):
"""Simple Cubic"""
return Atoms(symbols='%s2' % symbol, pbc=True, cell=[a, a, a])
def alloy(structure, symbol1, symbol2, a):
return eval(structure)(symbol1, symbol2, a)
## ZnS = zincblende('Zn', 'S', 5.41).repeat([2, 2, 2])
## view(ZnS)
## NaCl = rocksalt('Na', 'Cl', 5.64)
## view(NaCl)
## CaF2 = fluorite('Ca', 'F', 5.64).repeat([2, 2, 2])
## view(CaF2)
## MoSe2 = c7('Si', 'O', 3.289).repeat([4, 4, 3])
## view(MoSe2)
## CaTiO3 = perovskite('Ca', 'Ti', 'O', 3.84)
## view(CaTiO3)
## Be = hcp('Be', 2.29, c=1.567 * 2.29)
## view(Be)
## ZnO = wurtzite('Zn', 'O', 3.25, c=5.23).repeat([2, 2, 2])
## view(ZnO)
|
qsnake/gpaw
|
scripts/solids.py
|
Python
|
gpl-3.0
| 7,113
|
[
"ASE"
] |
8630239b82ace78915ae0eb3a3d09ef579097f545c6f5250ea7bf648847c096c
|
"""
Test courseware search
"""
import os
import json
from nose.plugins.attrib import attr
from flaky import flaky
from ..helpers import UniqueCourseTest
from ...pages.common.logout import LogoutPage
from ...pages.studio.utils import add_html_component, click_css, type_in_codemirror
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.container import ContainerPage
from ...pages.lms.courseware_search import CoursewareSearchPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
@attr('shard_5')
class CoursewareSearchTest(UniqueCourseTest):
"""
Test courseware search.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = 'student101@example.com'
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
HTML_CONTENT = """
Someday I'll wish upon a star
And wake up where the clouds are far
Behind me.
Where troubles melt like lemon drops
Away above the chimney tops
That's where you'll find me.
"""
SEARCH_STRING = "chimney"
EDITED_CHAPTER_NAME = "Section 2 - edited"
EDITED_SEARCH_STRING = "edited"
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(os.remove, self.TEST_INDEX_FILENAME)
super(CoursewareSearchTest, self).setUp()
self.courseware_search_page = CoursewareSearchPage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Section 1').add_children(
XBlockFixtureDesc('sequential', 'Subsection 1')
)
).add_children(
XBlockFixtureDesc('chapter', 'Section 2').add_children(
XBlockFixtureDesc('sequential', 'Subsection 2')
)
).install()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_publish_content(self, section_index):
"""
Publish content on studio course page under specified section
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_outline.visit()
subsection = self.course_outline.section_at(section_index).subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
unit.publish()
def _studio_edit_chapter_name(self, section_index):
"""
Edit chapter name on studio course page under specified section
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_outline.visit()
section = self.course_outline.section_at(section_index)
section.change_name(self.EDITED_CHAPTER_NAME)
def _studio_add_content(self, section_index):
"""
Add content on studio course page under specified section
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
# create a unit in course outline
self.course_outline.visit()
subsection = self.course_outline.section_at(section_index).subsection_at(0)
subsection.expand_subsection()
subsection.add_unit()
# got to unit and create an HTML component and save (not publish)
unit_page = ContainerPage(self.browser, None)
unit_page.wait_for_page()
add_html_component(unit_page, 0)
unit_page.wait_for_element_presence('.edit-button', 'Edit button is visible')
click_css(unit_page, '.edit-button', 0, require_notification=False)
unit_page.wait_for_element_visibility('.modal-editor', 'Modal editor is visible')
type_in_codemirror(unit_page, 0, self.HTML_CONTENT)
click_css(unit_page, '.action-save', 0)
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_outline.visit()
self.course_outline.start_reindex()
self.course_outline.wait_for_ajax()
def _search_for_content(self, search_term):
"""
Login and search for specific content
Arguments:
search_term - term to be searched for
Returns:
(bool) True if search term is found in resulting content; False if not found
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(search_term)
return search_term in self.courseware_search_page.search_results.html[0]
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
def test_search(self):
"""
Make sure that you can search for something.
"""
# Create content in studio without publishing.
self._studio_add_content(0)
# Do a search, there should be no results shown.
self.assertFalse(self._search_for_content(self.SEARCH_STRING))
# Publish in studio to trigger indexing.
self._studio_publish_content(0)
# Do the search again, this time we expect results.
self.assertTrue(self._search_for_content(self.SEARCH_STRING))
@flaky # TODO fix SOL-835
def test_reindex(self):
"""
Make sure new content gets reindexed on button press.
"""
# Create content in studio without publishing.
self._studio_add_content(1)
# Do a search, there should be no results shown.
self.assertFalse(self._search_for_content(self.EDITED_SEARCH_STRING))
# Publish in studio to trigger indexing, and edit chapter name afterwards.
self._studio_publish_content(1)
# Do a ReIndex from studio to ensure that our stuff is updated before the next stage of the test
self._studio_reindex()
# Search after publish, there should still be no results shown.
self.assertFalse(self._search_for_content(self.EDITED_SEARCH_STRING))
self._studio_edit_chapter_name(1)
# Do a ReIndex from studio to ensure that our stuff is updated before the next stage of the test
self._studio_reindex()
# Do the search again, this time we expect results.
self.assertTrue(self._search_for_content(self.EDITED_SEARCH_STRING))
|
antonve/s4-project-mooc
|
common/test/acceptance/tests/lms/test_lms_courseware_search.py
|
Python
|
agpl-3.0
| 7,347
|
[
"VisIt"
] |
decb0b9a99581910f0d842947828e460d26e80fc6a0adb666972d932f8c59d39
|
#!/usr/bin/python
import Scientific.IO.NetCDF as nc
import numpy as np
import pylab as pl
import sys
if len(sys.argv) != 2 or '-h' in sys.argv or '--help' in sys.argv:
print "\nUsage:\tcompare.py <file.nc>\n"
print "Author:\tHannu Parviainen"
print "\thannu.p.parviainen@helsinki.fi\n"
sys.exit(1)
filename = sys.argv[1]
f = nc.NetCDFFile(filename, 'r')
if f.result_type == 'hemisphere':
ds = np.array(f.variables['res__fld_hemisphere_simulated'].getValue(), np.float64)
da = np.array(f.variables['res__fld_hemisphere_analytical'].getValue(), np.float64)
elif f.result_type == 'tabulated':
ds = np.array(f.variables['res__fld_table_simulated'].getValue(), np.float64)
da = np.array(f.variables['res__fld_table_analytical'].getValue(), np.float64)
de = np.abs((da - ds) / da) * 100.0
print '\nSimulation type: %s' %f.simulation_type
print 'Result type: %s' %f.result_type
print '\nError:\tmean: \t%6.3f\n\tmax: \t%6.3f\n\tmin: \t%6.3f\n' %(de.mean(), de.max(), de.min())
pl.gray()
nLines = ds.shape[0]/2 - 1
elNames = f.mat__element_names.split()
pIdx = 1
for ln in range(1,nLines + 1):
sll = ds[0,:,:] / ds[ln*2,:,:]
all = da[0,:,:] / da[ln*2,:,:]
vmi = np.min(sll.min(), all.min())
vma = np.max(sll.max(), all.max())
pl.subplot(nLines,3,pIdx)
if(ln==1): pl.title('Simulation')
pl.imshow(sll, vmin=vmi, vmax=vma)
pl.text(0.5, 1.5, elNames[0] + '/' + elNames[ln])
pIdx +=1
pl.subplot(nLines,3,pIdx)
if(ln==1): pl.title('Analytical')
pl.imshow(all, vmin=vmi, vmax=vma)
pIdx +=1
pl.subplot(nLines,3,pIdx)
if(ln==1): pl.title('Abs(Sim - An)')
pl.imshow(np.abs(sll - all), vmin=0.0, vmax=0.1*vma)
pIdx +=1
pl.show()
f.close
|
dronir/EM
|
test/xrfip_em/compare.py
|
Python
|
gpl-3.0
| 1,776
|
[
"NetCDF"
] |
ee488edee1b2df115bcbaffbd624815a07d63d039431cc7f50241b7b475701a4
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
from django.utils.translation import gettext_lazy as _
####################
# CORE #
####################
DEBUG = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/current/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('bn', _('Bengali')),
('cs', _('Czech')),
('cy', _('Welsh')),
('da', _('Danish')),
('de', _('German')),
('en', _('English')),
('es', _('Spanish')),
('fr', _('French')),
('gl', _('Galician')),
('is', _('Icelandic')),
('it', _('Italian')),
('no', _('Norwegian')),
('pt-br', _('Brazilian')),
('ro', _('Romanian')),
('ru', _('Russian')),
('sk', _('Slovak')),
('sr', _('Serbian')),
('sv', _('Swedish')),
('zh-cn', _('Simplified Chinese')),
)
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = 'postgresql' # 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# Extension on all templates.
TEMPLATE_FILE_EXTENSION = '.html'
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.core.template.loaders.filesystem.load_template_source',
'django.core.template.loaders.app_directories.load_template_source',
# 'django.core.template.loaders.eggs.load_template_source',
)
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Path to the "jing" executable -- needed to validate XMLFields
JING_PATH = "/usr/bin/jing"
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# Default formatting for date objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
TIME_FORMAT = 'P'
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"django.middleware.sessions.SessionMiddleware",
# "django.middleware.http.ConditionalGetMiddleware",
# "django.middleware.gzip.GZipMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.doc.XViewMiddleware",
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'hotclub' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'simple://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
|
tungvx/deploy
|
Django-0.90/django/conf/global_settings.py
|
Python
|
apache-2.0
| 7,854
|
[
"VisIt"
] |
269e749f55b04abaf81fecb738d23ca316f8544c083402f7581ff69642602bb3
|
#!/usr/bin/env python
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#einit
#Essential Pathways v1.0.2
#Author:realasking
#Email:realasking@gmail.com,tomwangsim@163.com
#Aug-23,2017,in USTB
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import sys
import os
import re
import sqlite3
import shutil
from epw import cmds
class einit():
def __init__(self,cfolder,dbf,mname):
self.home=os.path.expanduser("~")
#cfolder should be .ep
self.conf_folder=cfolder
#dbf should be ep.db
self.dbfile=dbf
#mname should be epath
self.module_name=mname
self.folder=self.home+'/'+self.conf_folder
self.df=self.folder+'/'+self.dbfile
self.bf=self.home+'/BEP'
self.info=cmds.warnings()
if not os.path.exists(self.folder):
os.makedirs(self.folder,exist_ok=True)
if not os.path.exists(self.bf):
os.makedirs(self.bf,exist_ok=True)
if not os.path.isfile(self.home+'/.modulespath'):
self.info.Merror()
exit()
else:
setmf=0
for modules_folder in re.split(':',os.environ['MODULEPATH']):
if os.access(modules_folder, os.W_OK):
setmf=1
self.module_file=modules_folder+'/'+self.module_name
break
if setmf==0:
self.info.Mnerror()
exit()
|
realasking/essential-pathway
|
epw/einit.py
|
Python
|
lgpl-3.0
| 1,502
|
[
"EPW"
] |
5638b08542164f48dd8d40edfa71b0538aa674a5073b988f15a38881c7bc2cd5
|
#
# libtcod 1.5.1 python wrapper
# Copyright (c) 2008,2009,2010 Jice & Mingos
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE AND MINGOS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE OR MINGOS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import ctypes
import struct
from ctypes import *
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
if sys.platform.find('linux') != -1:
_lib = ctypes.cdll['./libtcod.so']
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = ctypes.cdll['./libtcod.dylib']
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = ctypes.cdll['./libtcod.so']
HAIKU = True
else:
try:
_lib = ctypes.cdll['./libtcod-mingw.dll']
MINGW=True
except WindowsError:
_lib = ctypes.cdll['./libtcod-VS.dll']
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
_lib.TCOD_color_multiply = _lib.TCOD_color_multiply_wrapper
_lib.TCOD_color_add = _lib.TCOD_color_add_wrapper
_lib.TCOD_color_multiply_scalar = _lib.TCOD_color_multiply_scalar_wrapper
_lib.TCOD_color_subtract = _lib.TCOD_color_subtract_wrapper
_lib.TCOD_color_lerp = _lib.TCOD_color_lerp_wrapper
_lib.TCOD_console_get_default_background = _lib.TCOD_console_get_default_background_wrapper
_lib.TCOD_console_get_default_foreground = _lib.TCOD_console_get_default_foreground_wrapper
_lib.TCOD_console_get_char_background = _lib.TCOD_console_get_char_background_wrapper
_lib.TCOD_console_get_char_foreground = _lib.TCOD_console_get_char_foreground_wrapper
_lib.TCOD_console_get_fading_color = _lib.TCOD_console_get_fading_color_wrapper
_lib.TCOD_image_get_pixel = _lib.TCOD_image_get_pixel_wrapper
_lib.TCOD_image_get_mipmap_pixel = _lib.TCOD_image_get_mipmap_pixel_wrapper
_lib.TCOD_parser_get_color_property = _lib.TCOD_parser_get_color_property_wrapper
HEXVERSION = 0x010501
STRVERSION = "1.5.1"
TECHVERSION = 0x01050103
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
# Should be valid on any platform, check it! Has to be done after Color is defined.
if MAC:
from cprotos import setup_protos
setup_protos(_lib)
_lib.TCOD_color_equals.restype = c_bool
_lib.TCOD_color_multiply.restype = Color
_lib.TCOD_color_multiply_scalar.restype = Color
_lib.TCOD_color_add.restype = Color
_lib.TCOD_color_subtract.restype = Color
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('shift', c_bool),
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = r
self.fore_g[i] = g
self.fore_b[i] = b
self.char[i] = ord(char)
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = back_r
self.back_g[i] = back_g
self.back_b[i] = back_b
self.fore_r[i] = fore_r
self.fore_g[i] = fore_g
self.fore_b[i] = fore_b
self.char[i] = ord(char)
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(dest, (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(dest, (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(dest, (c_int * len(self.char))(*self.char))
_lib.TCOD_console_credits_render.restype = c_bool
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, c_char_p(title), fullscreen, renderer)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(c_char_p(fontFile), flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(asciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
if type(s) == bytes:
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(c_char_p(title))
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, c_int(flag))
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, c_int(alignment))
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
def console_print(con, x, y, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print(c_void_p(con), x, y, c_char_p(fmt))
else:
_lib.TCOD_console_print_utf(c_void_p(con), x, y, fmt)
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes:
_lib.TCOD_console_print_ex(c_void_p(con), x, y, flag, alignment, c_char_p(fmt))
else:
_lib.TCOD_console_print_ex_utf(c_void_p(con), x, y, flag, alignment, fmt)
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_print_rect_ex(c_void_p(con), x, y, w, h, flag, alignment, c_char_p(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(c_void_p(con), x, y, w, h, flag, alignment, fmt)
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes:
return _lib.TCOD_console_get_height_rect(c_void_p(con), x, y, w, h, c_char_p(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(c_void_p(con), x, y, w, h, fmt)
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=0):
_lib.TCOD_console_print_frame(c_void_p(con), x, y, w, h, c_int(clear), flag, c_char_p(fmt))
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
##_lib.TCOD_console_set_fade_wrapper(fade, fadingColor)
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_from_file(filename):
return _lib.TCOD_console_from_file(filename)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(con, cr, cg, cb)
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int_)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
#otherwise convert using the struct module
carr = struct.pack('%di' % len(arr), *arr)
_lib.TCOD_console_fill_char(con, carr)
def console_load_asc(con, filename) :
_lib.TCOD_console_load_asc(con,filename)
def console_save_asc(con, filename) :
_lib.TCOD_console_save_asc(con,filename)
def console_load_apf(con, filename) :
_lib.TCOD_console_load_apf(con,filename)
def console_save_apf(con, filename) :
_lib.TCOD_console_save_apf(con,filename)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(c_char_p(name))
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(c_int(mask),byref(k),byref(m))
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(c_int(mask),byref(k),byref(m),c_bool(flush))
############################
# line module
############################
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line.restype=c_bool
_lib.TCOD_line_step_mt.restype = c_bool
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.restype = Color
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
def image_load(filename):
return _lib.TCOD_image_load(c_char_p(filename))
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
##_lib.TCOD_image_put_pixel_wrapper(image, x, y, col)
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, c_char_p(filename))
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_bool
_lib.TCOD_parser_get_bool_property.restype = c_bool
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
_lib.TCOD_parser_get_color_property.restype = Color
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_bool(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_bool(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_bool(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name)
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, c_char_p(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, c_char_p(filename), 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, c_char_p(name))
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, c_char_p(name))
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, c_char_p(name))
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, c_char_p(name))
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, c_char_p(name))
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, c_char_p(name))
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(c_void_p(parser), c_char_p(name), byref(d))
return d
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, c_char_p(name), c_int(typ))
return _convert_TCODList(clist, typ)
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_double.restype = c_double
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, c_double(mi), c_double(ma))
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, c_float(mi), c_float(ma), c_float(mean))
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, c_double(mi), c_double(ma), c_double(mean))
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_get.restype = c_float
_lib.TCOD_noise_get_ex.restype = c_float
_lib.TCOD_noise_get_fbm.restype = c_float
_lib.TCOD_noise_get_fbm_ex.restype = c_float
_lib.TCOD_noise_get_turbulence.restype = c_float
_lib.TCOD_noise_get_turbulence_ex.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), random)
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), c_float(oc), typ)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
def map_delete(m):
return _lib.TCOD_map_delete(m)
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_walk.restype = c_bool
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost)), None)
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p[0])
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost)), cbk_func)
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_generate.restype = c_char_p
_lib.TCOD_namegen_generate_custom.restype = c_char_p
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name) :
return _lib.TCOD_namegen_generate(name, 0)
def namegen_generate_custom(name, rule) :
return _lib.TCOD_namegen_generate(name, rule, 0)
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
return list(setsa)
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
|
xxxStanxxx/calaboucos-e-cachorros
|
win/libtcodpy.py
|
Python
|
mit
| 60,714
|
[
"Amber"
] |
9000860b65ec9850b0a20a3025a181719ea5104c4a7bb91cbbbb0385e9a921f4
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytical nuclear gradients can be calculated with the background charges.
The nuclear gradients driver (nuc_grad_method) can be called the same way as
the regular calculations.
Note:
1. the mcscf nuclear gradients have to be calculated with the (recommended)
first initialization method. See also example 02-mcscf.py
2. X2C gradients with QM/MM charges are not supported.
'''
import numpy
from pyscf import gto, scf, ci, mcscf, tddft, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mf.nuc_grad_method().run()
ci.CISD(mf).run().nuc_grad_method().run()
mc = mcscf.CASCI(mf, 6, 6).run()
mc.nuc_grad_method().run()
tddft.TDA(mf).run().nuc_grad_method().run(state=2)
|
gkc1000/pyscf
|
examples/qmmm/20-grad.py
|
Python
|
apache-2.0
| 1,213
|
[
"PySCF"
] |
1507e71bef0b8d33b483f84aba6a50e01835fff640503a9947d389476dcef061
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
from math import ceil
import sys
import numpy as np
import tensorflow as tf
VGG_MEAN = [103.939, 116.779, 123.68]
class FCN8VGG:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = sys.modules[self.__class__.__module__].__file__
# print path
path = os.path.abspath(os.path.join(path, os.pardir))
# print path
path = os.path.join(path, "vgg16.npy")
vgg16_npy_path = path
logging.info("Load npy file from '%s'.", vgg16_npy_path)
if not os.path.isfile(vgg16_npy_path):
logging.error(("File '%s' not found. Download it from "
"https://dl.dropboxusercontent.com/u/"
"50333326/vgg16.npy"), vgg16_npy_path)
sys.exit(1)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.wd = 5e-4
print("npy file loaded")
def build(self, rgb, train=False, num_classes=20, random_init_fc8=False,
debug=False):
"""
Build the VGG model using loaded weights
Parameters
----------
rgb: image batch tensor
Image in rgb shap. Scaled to Intervall [0, 255]
train: bool
Whether to build train or inference graph
num_classes: int
How many classes should be predicted (by fc8)
random_init_fc8 : bool
Whether to initialize fc8 layer randomly.
Finetuning is required in this case.
debug: bool
Whether to print additional Debug Information.
"""
# Convert RGB to BGR
with tf.name_scope('Processing'):
red, green, blue = tf.split(rgb, 3, axis=3)
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], 3)
if debug:
bgr = tf.Print(bgr, [tf.shape(bgr)],
message='Shape of input image: ',
summarize=4, first_n=1)
self.conv1_1 = self._conv_layer(bgr, "conv1_1")
self.conv1_2 = self._conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self._max_pool(self.conv1_2, 'pool1', debug)
self.conv2_1 = self._conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self._conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self._max_pool(self.conv2_2, 'pool2', debug)
self.conv3_1 = self._conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self._conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self._conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self._max_pool(self.conv3_3, 'pool3', debug)
self.conv4_1 = self._conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self._conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self._conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self._max_pool(self.conv4_3, 'pool4', debug)
self.conv5_1 = self._conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self._conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self._conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self._max_pool(self.conv5_3, 'pool5', debug)
self.fc6 = self._fc_layer(self.pool5, "fc6")
if train:
self.fc6 = tf.nn.dropout(self.fc6, 0.5)
self.fc7 = self._fc_layer(self.fc6, "fc7")
if train:
self.fc7 = tf.nn.dropout(self.fc7, 0.5)
if random_init_fc8:
self.score_fr = self._score_layer(self.fc7, "score_fr",
num_classes)
else:
self.score_fr = self._fc_layer(self.fc7, "score_fr",
num_classes=num_classes,
relu=False)
self.pred = tf.argmax(self.score_fr, dimension=3)
self.upscore2 = self._upscore_layer(self.score_fr,
shape=tf.shape(self.pool4),
num_classes=num_classes,
debug=debug, name='upscore2',
ksize=4, stride=2)
self.score_pool4 = self._score_layer(self.pool4, "score_pool4",
num_classes=num_classes)
self.fuse_pool4 = tf.add(self.upscore2, self.score_pool4)
self.upscore4 = self._upscore_layer(self.fuse_pool4,
shape=tf.shape(self.pool3),
num_classes=num_classes,
debug=debug, name='upscore4',
ksize=4, stride=2)
self.score_pool3 = self._score_layer(self.pool3, "score_pool3",
num_classes=num_classes)
self.fuse_pool3 = tf.add(self.upscore4, self.score_pool3)
self.upscore32 = self._upscore_layer(self.fuse_pool3,
shape=tf.shape(bgr),
num_classes=num_classes,
debug=debug, name='upscore32',
ksize=16, stride=8)
self.pred_up = tf.argmax(self.upscore32, dimension=3) # Max value's index
def _max_pool(self, bottom, name, debug):
pool = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name=name)
if debug:
pool = tf.Print(pool, [tf.shape(pool)],
message='Shape of %s' % name,
summarize=4, first_n=1)
return pool
def _conv_layer(self, bottom, name):
with tf.variable_scope(name) as scope:
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
# Add summary to Tensorboard
_activation_summary(relu)
return relu
def _fc_layer(self, bottom, name, num_classes=None,
relu=True, debug=False):
with tf.variable_scope(name) as scope:
shape = bottom.get_shape().as_list()
if name == 'fc6':
filt = self.get_fc_weight_reshape(name, [7, 7, 512, 4096])
elif name == 'score_fr':
name = 'fc8' # Name of score_fr layer in VGG Model
filt = self.get_fc_weight_reshape(name, [1, 1, 4096, 1000],
num_classes=num_classes)
else:
filt = self.get_fc_weight_reshape(name, [1, 1, 4096, 4096])
self._add_wd_and_summary(filt, self.wd, "fc_wlosses")
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name, num_classes=num_classes)
bias = tf.nn.bias_add(conv, conv_biases)
if relu:
bias = tf.nn.relu(bias)
_activation_summary(bias)
if debug:
bias = tf.Print(bias, [tf.shape(bias)],
message='Shape of %s' % name,
summarize=4, first_n=1)
return bias
def _score_layer(self, bottom, name, num_classes):
with tf.variable_scope(name) as scope:
# get number of input channels
in_features = bottom.get_shape()[3].value
shape = [1, 1, in_features, num_classes]
# He initialization Sheme
if name == "score_fr":
num_input = in_features
stddev = (2 / num_input) ** 0.5
elif name == "score_pool4":
stddev = 0.001
elif name == "score_pool3":
stddev = 0.0001
# Apply convolution
w_decay = self.wd
weights = self._variable_with_weight_decay(shape, stddev, w_decay,
decoder=True)
conv = tf.nn.conv2d(bottom, weights, [1, 1, 1, 1], padding='SAME')
# Apply bias
conv_biases = self._bias_variable([num_classes], constant=0.0)
bias = tf.nn.bias_add(conv, conv_biases)
_activation_summary(bias)
return bias
def _upscore_layer(self, bottom, shape,
num_classes, name, debug,
ksize=4, stride=2):
strides = [1, stride, stride, 1]
with tf.variable_scope(name):
in_features = bottom.get_shape()[3].value
if shape is None:
# Compute shape out of Bottom
in_shape = tf.shape(bottom)
h = ((in_shape[1] - 1) * stride) + 1
w = ((in_shape[2] - 1) * stride) + 1
new_shape = [in_shape[0], h, w, num_classes]
else:
new_shape = [shape[0], shape[1], shape[2], num_classes]
output_shape = tf.stack(new_shape)
logging.debug("Layer: %s, Fan-in: %d" % (name, in_features))
f_shape = [ksize, ksize, num_classes, in_features]
# create
num_input = ksize * ksize * in_features / stride
stddev = (2 / num_input) ** 0.5
weights = self.get_deconv_filter(f_shape)
self._add_wd_and_summary(weights, self.wd, "fc_wlosses")
deconv = tf.nn.conv2d_transpose(bottom, weights, output_shape,
strides=strides, padding='SAME')
if debug:
deconv = tf.Print(deconv, [tf.shape(deconv)],
message='Shape of %s' % name,
summarize=4, first_n=1)
_activation_summary(deconv)
return deconv
def get_deconv_filter(self, f_shape):
width = f_shape[0]
heigh = f_shape[0]
f = ceil(width / 2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
var = tf.get_variable(name="up_filter", initializer=init,
shape=weights.shape)
return var
def get_conv_filter(self, name):
init = tf.constant_initializer(value=self.data_dict[name][0],
dtype=tf.float32)
shape = self.data_dict[name][0].shape
print('Layer name: %s' % name)
print('Layer shape: %s' % str(shape))
var = tf.get_variable(name="filter", initializer=init, shape=shape)
if not tf.get_variable_scope().reuse:
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
_variable_summaries(var)
return var
def get_bias(self, name, num_classes=None):
bias_wights = self.data_dict[name][1]
shape = self.data_dict[name][1].shape
if name == 'fc8':
bias_wights = self._bias_reshape(bias_wights, shape[0],
num_classes)
shape = [num_classes]
init = tf.constant_initializer(value=bias_wights,
dtype=tf.float32)
var = tf.get_variable(name="biases", initializer=init, shape=shape)
_variable_summaries(var)
return var
def get_fc_weight(self, name):
init = tf.constant_initializer(value=self.data_dict[name][0],
dtype=tf.float32)
shape = self.data_dict[name][0].shape
var = tf.get_variable(name="weights", initializer=init, shape=shape)
if not tf.get_variable_scope().reuse:
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
_variable_summaries(var)
return var
def _bias_reshape(self, bweight, num_orig, num_new):
""" Build bias weights for filter produces with `_summary_reshape`
"""
n_averaged_elements = num_orig // num_new
avg_bweight = np.zeros(num_new)
for i in range(0, num_orig, n_averaged_elements):
start_idx = i
end_idx = start_idx + n_averaged_elements
avg_idx = start_idx // n_averaged_elements
if avg_idx == num_new:
break
avg_bweight[avg_idx] = np.mean(bweight[start_idx:end_idx])
return avg_bweight
def _summary_reshape(self, fweight, shape, num_new):
""" Produce weights for a reduced fully-connected layer.
FC8 of VGG produces 1000 classes. Most semantic segmentation
task require much less classes. This reshapes the original weights
to be used in a fully-convolutional layer which produces num_new
classes. To archive this the average (mean) of n adjanced classes is
taken.
Consider reordering fweight, to perserve semantic meaning of the
weights.
Args:
fweight: original weights
shape: shape of the desired fully-convolutional layer
num_new: number of new classes
Returns:
Filter weights for `num_new` classes.
"""
num_orig = shape[3]
shape[3] = num_new
assert (num_new < num_orig)
n_averaged_elements = num_orig // num_new
avg_fweight = np.zeros(shape)
for i in range(0, num_orig, n_averaged_elements):
start_idx = i
end_idx = start_idx + n_averaged_elements
avg_idx = start_idx // n_averaged_elements
if avg_idx == num_new:
break
avg_fweight[:, :, :, avg_idx] = np.mean(
fweight[:, :, :, start_idx:end_idx], axis=3)
return avg_fweight
def _variable_with_weight_decay(self, shape, stddev, wd, decoder=False):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal
distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = tf.get_variable('weights', shape=shape,
initializer=initializer)
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
if not decoder:
tf.add_to_collection('losses', weight_decay)
else:
tf.add_to_collection('dec_losses', weight_decay)
_variable_summaries(var)
return var
def _add_wd_and_summary(self, var, wd, collection_name="losses"):
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection(collection_name, weight_decay)
_variable_summaries(var)
return var
def _bias_variable(self, shape, constant=0.0):
initializer = tf.constant_initializer(constant)
var = tf.get_variable(name='biases', shape=shape,
initializer=initializer)
_variable_summaries(var)
return var
def get_fc_weight_reshape(self, name, shape, num_classes=None):
print('Layer name: %s' % name)
print('Layer shape: %s' % shape)
weights = self.data_dict[name][0]
weights = weights.reshape(shape)
if num_classes is not None:
weights = self._summary_reshape(weights, shape,
num_new=num_classes)
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
var = tf.get_variable(name="weights", initializer=init, shape=shape)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_summaries(var):
"""Attach a lot of summaries to a Tensor."""
if not tf.get_variable_scope().reuse:
name = var.op.name
logging.info("Creating Summary for: %s" % name)
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar(name + '/mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.summary.scalar(name + '/sttdev', stddev)
tf.summary.scalar(name + '/max', tf.reduce_max(var))
tf.summary.scalar(name + '/min', tf.reduce_min(var))
tf.summary.histogram(name, var)
|
mengli/PcmAudioRecorder
|
self_driving/road_seg/fcn8_vgg.py
|
Python
|
apache-2.0
| 18,314
|
[
"Gaussian"
] |
892ee61d3c1c0432aab807077744baadbd1377822f99a3456df37e2b3c657769
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 15:09:25 2013
@author: ran110
"""
import fnmatch, os, subprocess
'''
- demg: elevation grid
- tmig: total magnetic intensity grid
- doseg: Radiometrics Dose Rate grid
- pctkg: Radiometrics Potassium Percentage grid
- ppmthg: Radiometrics Thorium parts per million grid
- ppmug: Radiometrics Uranium parts per million grid
'''
changes = {'ppmug':{'long_name':'Radiometrics Uranium parts per million grid'},
'ppmthg':{'long_name':'Radiometrics Thorium parts per million grid'},
'pctkg':{'long_name':'Radiometrics Potassium Percentage grid'},
'doseg':{'long_name':'Radiometrics Dose Rate grid'},
'tmig':{'long_name':'Total magnetic intensity grid'},
'demg':{'long_name':'Digital elevation grid'},
}
import glob
for filetype in changes.keys():
for currfile in glob.glob('/projects/r17/test/outputs/netCDF/*%s.nc4' % filetype):
print filetype, currfile
filename, extension = os.path.splitext(os.path.basename(currfile))
try:
command = ['ncrename', '-h', '-v', 'Band1,%s' % filename, currfile]
stdout, stderr = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print(command)
if stdout.strip() is not None:
print stdout
if stderr.strip() is not None:
print stderr
except Exception, e:
print e
pass
try:
command = ['ncatted', '-a',
'long_name,%s,o,c,%s' % (filename, changes[filetype]['long_name']),
currfile ]
stdout, stderr = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print(command)
if stdout.strip() is not None:
print stdout
if stderr.strip() is not None:
print stderr
except Exception, e:
print e
pass
|
AuScope/VEGL-Portal
|
scripts/data-conversion/netcdf-rename.py
|
Python
|
gpl-3.0
| 2,044
|
[
"NetCDF"
] |
165ef19e102ca728471c78da27fcc995b5683cd8fc829f448f1dec410fdc49ca
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RImpute(RPackage):
"""impute: Imputation for microarray data.
Imputation for microarray data (currently KNN only)"""
homepage = "https://bioconductor.org/packages/impute"
git = "https://git.bioconductor.org/packages/impute.git"
version('1.58.0', commit='dc17173df08d965a0d0aac9fa4ad519bd99d127e')
version('1.56.0', commit='6c037ed4dffabafceae684265f86f2a18751b559')
version('1.54.0', commit='efc61f5197e8c4baf4ae881fb556f0312beaabd8')
version('1.52.0', commit='7fa1b917a5dd60f2aaf52d9aae1fcd2c93511d63')
version('1.50.1', commit='31d1cc141797afdc83743e1d95aab8a90ee19b71')
depends_on('r@2.10:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-impute/package.py
|
Python
|
lgpl-2.1
| 896
|
[
"Bioconductor"
] |
4677db1ad37864cc862d8cea79ca77f0aef690a8ff0f5180db324bd00c6e843e
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
from importlib import import_module
import inspect
import logging
import os
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import empty
from django.utils.functional import SimpleLazyObject
from django.utils.module_loading import module_has_submodule
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon.decorators import _current_component
from horizon.decorators import require_auth
from horizon.decorators import require_component_access
from horizon.decorators import require_perms
from horizon import loaders
from horizon.utils import settings as utils_settings
# Name of the panel group for panels to be displayed without a group.
DEFAULT_PANEL_GROUP = 'default'
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
decorated = decorator(pattern.callback, *args, **kwargs)
pattern.callback = decorated
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
# FIXME(lhcheng): We need to find a better way to cache the result.
# Rather than storing it in the session, we could leverage the Django
# session. Currently, this has been causing issue with cookie backend,
# adding 1600+ in the cookie size.
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
def _wrapped_include(arg):
"""Convert the old 3-tuple arg for include() into the new format.
The argument "arg" should be a tuple with 3 elements:
(pattern_list, app_namespace, instance_namespace)
Prior to Django 2.0, django.urls.conf.include() accepts 3-tuple arg
(urlconf, namespace, app_name), but it was droppped in Django 2.0.
This function is used to convert the older 3-tuple used in horizon code
into the new format where namespace needs to be passed as the second arg.
For more details, see
https://docs.djangoproject.com/en/2.0/releases/1.9/#passing-a-3-tuple-or-an-app-name-to-include
"""
pattern_list, app_namespace, instance_namespace = arg
return include((pattern_list, app_namespace), namespace=instance_namespace)
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = tuple()
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = []
return urlpatterns
# FIXME(lhcheng): Removed the access_cached decorator for now until
# a better implementation has been figured out. This has been causing
# issue with cookie backend, adding 1600+ in the cookie size.
# @access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = utils_settings.import_setting("POLICY_CHECK_FUNCTION")
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
rule_param = rule
if not any(isinstance(r, (list, tuple)) for r in rule):
rule_param = (rule,)
if policy_check(rule_param, request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry:
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either a boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %(self)s: %(exc)s",
{'self': self, 'exc': exc})
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(
urlpatterns, require_component_access, component=self)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", DEFAULT_PANEL_GROUP)
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either a boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the Panel instance registered with this dashboard."""
return self._registered(panel)
def get_panels(self):
"""Returns the Panel instances registered with this dashboard in order.
Panel grouping information is not included.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup.
Returns None if not registered.
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if registered:
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return collections.OrderedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s.", self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns.append(url(r'^%s/' % url_slug,
_wrapped_include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns.append(
url(r'', _wrapped_include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
default_created = False
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
if panel_group.slug == DEFAULT_PANEL_GROUP:
default_created = True
# Plugin panels can be added to a default panel group. Make sure such a
# default group exists.
if not default_created:
default_group = PanelGroup(self)
panel_groups.insert(0, (default_group.slug, default_group))
self._panel_groups = collections.OrderedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if registered:
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif self._registry:
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns.append(url(r'^%s/' % dash.slug,
_wrapped_include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError as e:
LOG.warning("Could not import panel module %(module)s: "
"%(exc)s", {'module': mod_path, 'exc': e})
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site.
All dealings with horizon get the same instance no matter what.
There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(HorizonSite, cls).__new__(cls,
*args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
NeCTAR-RC/horizon
|
horizon/base.py
|
Python
|
apache-2.0
| 40,078
|
[
"VisIt"
] |
901131a86a16145fe8854264ff10ab3f074b0afeb23869581061ca40f4bb47b2
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Extra form fields and widgets.
import logging
import json
import urllib
from django.forms import Widget, Field
from django import forms
from django.forms.util import ErrorList, ValidationError, flatatt
from django.forms.fields import MultiValueField, CharField, ChoiceField, BooleanField
from django.forms.widgets import MultiWidget, Select, TextInput, Textarea, HiddenInput, Input
from django.utils import formats
from django.utils.safestring import mark_safe
from django.utils.encoding import StrAndUnicode, force_unicode
import desktop.lib.i18n
from desktop.lib.i18n import smart_str
LOG = logging.getLogger(__name__)
class SplitDateTimeWidget(forms.MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
The date_class and time_class attributes specify class names to be given
specifically to the corresponding DateInput and TimeInput widgets.
"""
date_format = formats.get_format('DATE_INPUT_FORMATS')[0]
time_format = formats.get_format('TIME_INPUT_FORMATS')[0]
def __init__(self, attrs=None, date_format=None, time_format=None, date_class='date', time_class='time'):
date_attrs = dict(attrs)
time_attrs = dict(attrs)
if 'class' in date_attrs:
date_classes = [clss for clss in date_attrs['class'].split() if clss != date_class]
date_classes.append(date_class)
date_attrs['class'] = ' '.join(date_classes)
else:
date_attrs['class'] = date_class
if 'class' in time_attrs:
time_classes = [clss for clss in time_attrs['class'].split() if clss != time_class]
time_classes.append(time_class)
time_attrs['class'] = ' '.join(time_classes)
else:
time_attrs['class'] = time_class
widgets = (forms.DateInput(attrs=date_attrs, format=date_format),
forms.TimeInput(attrs=time_attrs, format=time_format))
del attrs['class']
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class MultipleInputWidget(Widget):
"""
Together with MultipleInputField, represents repeating a form element many times,
and representing a list of values for that element.
This could be made generic to work with any widget, but currently
renders itself as a regular old <input>.
"""
def __init__(self, attrs=None):
super(MultipleInputWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ()
if attrs is None or "count" not in attrs:
count = 5
else:
count = attrs["count"]
count = max(len(value) + 1, count)
s = ""
for i in range(count):
if value is not None and i < len(value):
v = value[i]
s += '<input name="%s" value="%s"></input>' % (name, v)
else:
s += '<input name="%s"></input>' % name
return s
def value_from_datadict(self, data, files, name):
# Sometimes this is a QueryDict, and sometimes ar regular dict,
# so we adapt:
non_empty = lambda x: len(x) != 0
return filter(non_empty, data.getlist(name))
class MultipleInputField(Field):
widget = MultipleInputWidget
def __init__(self, *args, **kwargs):
super(MultipleInputField, self).__init__(*args, **kwargs)
def clean(self, value):
return value
OTHER_VAL, OTHER_PRES = "__other__", "Other..."
class ChoiceOrOtherWidget(MultiWidget):
"""
Together with ChoiceOrOtherField represents a drop-down and an "other"
text-box.
This may not map well onto an AJAX model, since in that world
the JS presentation will handle sending only one value.
"""
def __init__(self, attrs=None, choices=()):
self.choices = choices
self.values = [ val for pres, val in choices if val != OTHER_VAL ]
widgets = (
Select(attrs=attrs, choices=choices),
TextInput(attrs=attrs)
)
super(ChoiceOrOtherWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value in self.values:
return [value, ""]
else:
return [OTHER_VAL, value]
class ChoiceOrOtherField(MultiValueField):
def __init__(self, choices, initial=None, *args, **kwargs):
assert not kwargs.get('required', False), "required=True is not supported"
allchoices = [ x for x in choices ] # Force choices into a list.
allchoices.append( (OTHER_VAL, OTHER_PRES) )
self.widget = ChoiceOrOtherWidget(choices=allchoices)
choice_initial, other_initial = None, None
if initial is not None:
# Match initial against one of the values
if initial in [ x for x, y in choices ]:
choice_initial = initial
else:
choice_initial = OTHER_VAL
other_initial = initial
fields = [
ChoiceField(required=False, choices=allchoices),
CharField(required=False)
]
# Be careful not to make the initial value a tuple;
# it's checked explicitly to be a list in MultiWidget's
# render.
super(ChoiceOrOtherField, self).__init__(fields, initial=[choice_initial, other_initial], *args, **kwargs)
def compress(self, data_list):
if len(data_list) == 0:
return None
if data_list[0] == OTHER_VAL:
return data_list[1]
else:
if data_list[1]:
raise ValidationError("Either select from the drop-down or select %s" % OTHER_PRES)
return data_list[0]
class KeyValueWidget(Textarea):
def render(self, name, value, attrs=None):
# If we have a dictionary, render back into a string.
if isinstance(value, dict):
value = " ".join("=".join([k, v]) for k, v in value.iteritems())
return super(KeyValueWidget, self).render(name, value, attrs)
class KeyValueField(CharField):
"""
Represents an input area for key/value pairs in the following format:
"<key1>=<val1> <key2>=<value2>...."
clean() returns a dictionary of parsed key/value pairs.
"""
widget = KeyValueWidget
def __init__(self, *args, **kwargs):
super(KeyValueField, self).__init__(*args, **kwargs)
def clean(self, value):
"""Converts the raw key=val text to a dictionary of key/val pairs"""
super(KeyValueField, self).clean(value)
try:
return dict(kvpair.split('=', 2) for kvpair in value.split())
except Exception:
raise ValidationError("Not in key=value format.")
class UnicodeEncodingField(ChoiceOrOtherField):
"""
The cleaned value of the field is the actual encoding, not a tuple
"""
CHOICES = [
('utf-8', 'Unicode UTF8'),
('utf-16', 'Unicode UTF16'),
('latin_1', 'Western ISO-8859-1'),
('latin_9', 'Western ISO-8859-15'),
('cyrillic', 'Cryrillic'),
('arabic', 'Arabic'),
('greek', 'Greek'),
('hebrew', 'Hebrew'),
('shift_jis', 'Japanese (Shift-JIS)'),
('euc-jp', 'Japanese (EUC-JP)'),
('iso2022_jp', 'Japanese (ISO-2022-JP)'),
('euc-kr', 'Korean (EUC-KR)'),
('iso2022-kr', 'Korean (ISO-2022-KR)'),
('gbk', 'Chinese Simplified (GBK)'),
('big5hkscs', 'Chinese Traditional (Big5-HKSCS)'),
('ascii', 'ASCII'),
]
def __init__(self, initial=None, *args, **kwargs):
ChoiceOrOtherField.__init__(self, UnicodeEncodingField.CHOICES, initial, *args, **kwargs)
def clean(self, value):
encoding = value[0] == OTHER_VAL and value[1] or value[0]
if encoding and not desktop.lib.i18n.validate_encoding(encoding):
raise forms.ValidationError("'%s' encoding is not available" % (encoding,))
return encoding
class MultiForm(object):
"""
Initialize this with the necessary sub-forms, and then
call bind(request).
TODO(philip): Should users use this by extending
it? Or is this really a forms.Field subclass.
"""
def __init__(self, prefix='', **kwargs):
"""
prefix is prepended to the prefix of the member forms
Keyword arguments are:
key=form_class, key2=form_class2, ...
The form_class can be a Form, a Formset, or a MultiForm.
It is currently not possible to specify ctor arguments to the form_class.
"""
self._form_types = kwargs
self._is_bound = False
self._prefix = prefix
def __str__(self):
return 'MultForm at %s' % (self._prefix)
def add_prefix(self, name):
"""Returns the subform name with a prefix prepended, if the prefix is set"""
return self._prefix and ('%s.%s' % (self._prefix, name)) or name
def get_subforms(self):
"""get_subforms() -> An iterator over (name, subform)"""
assert self._is_bound
return self._forms.iteritems()
def has_subform_data(self, subform_name, data):
"""Test if data contains any information bound for the subform"""
prefix = self.add_prefix(subform_name)
return len([ k.startswith(prefix) for k in data.keys() ]) != 0
def add_subform(self, name, form_cls, data=None):
"""Dynamically extend this MultiForm to include a new subform"""
self._form_types[name] = form_cls
self._bind_one(name, form_cls, data)
def remove_subform(self, name):
"""Dynamically remove a subform. Raises KeyError."""
del self._form_types[name]
if self._forms.has_key(name):
del self._forms[name]
def bind(self, data=None, instances=None):
self._is_bound = True
self._forms = {}
for key, form_cls in self._form_types.iteritems():
instance = instances is not None and instances.get(key) or None
self._bind_one(key, form_cls, data, instance=instance)
def _bind_one(self, key, form_cls, data=None, instance=None):
prefix = self.add_prefix(key)
if issubclass(form_cls, MultiForm):
member = form_cls(prefix=prefix)
member.bind(data=data)
elif instance is not None:
member = form_cls(data=data, prefix=prefix, instance=instance)
else:
member = form_cls(data=data, prefix=prefix)
self._forms[key] = member
def __getattr__(self, key):
assert self._is_bound
return self._forms.get(key)
def is_valid(self):
assert self._is_bound
r = True
# Explicitly iterate through all of them; we don't want
# to abort early, since we want each form's is_valid to be run.
for f in self._forms.values():
if not f.is_valid():
LOG.error(smart_str(f.errors))
r = False
return r
class SubmitButton(Input):
"""
A widget that presents itself as a submit button.
"""
input_type = "submit"
def render(self, name, value, attrs=None):
if value is None:
value = 'True'
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name, value=value)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
return mark_safe(u'<button%s>%s</button>' % (flatatt(final_attrs), getattr(self, "label", "Submit")))
class ManagementForm(forms.Form):
add = BooleanField(widget=SubmitButton,required=False)
next_form_id = forms.IntegerField(widget=forms.HiddenInput, initial=0)
def __init__(self, add_label='+', *args, **kwargs):
super(ManagementForm, self).__init__(*args, **kwargs)
self.fields["add"].label = add_label
self.fields["add"].widget.label = add_label
def new_form_id(self):
"""
new_form_id() -> The id for the next member of the formset. Increment hidden value.
The ManagementForm needs to keep track of a monotonically increasing id, so that
new member forms don't reuse ids of deleted forms.
"""
# Hack. self.data is supposed to be immutable.
res = self.form_counts()
data2 = self.data.copy()
data2[self.add_prefix('next_form_id')] = str(res + 1)
self.data = data2
return res
def form_counts(self):
"""form_counts() -> The max number of forms, some could be non-existent (deleted)."""
try:
return int(self.data[ self.add_prefix('next_form_id') ])
except KeyError:
return self.fields['next_form_id'].initial
class BaseSimpleFormSet(StrAndUnicode):
"""
Manages multiple instances of the same form, and easily modifies how many of said
form there are.
This is similar to django.forms.formsets.BaseFormSet,
but is hopefully simpler.
We take a base form (that's passed in via the simple_formset_factory
machinery), and initialize it with prefix="prefix/N/", for integer
values of N. "perfix/add" specifies generating an extra empty one,
and "prefix/N/_delete" specifies deleting them.
"""
def __init__(self, data=None, prefix=None, initial=None):
self.is_bound = data is not None
assert prefix, "Prefix is required."
self.prefix = prefix
# The initial is sometimes set before the ctor, especially when used in a MultiForm,
# which doesn't allow passing custom ctor arguments.
self.initial = initial or getattr(self, 'initial', initial)
self.data = data
self._non_form_errors = None
self._errors = None
self._construct_forms()
def make_prefix(self, i):
return "%s-%s" % (self.prefix, i)
def _construct_mgmt_form(self):
if self.data:
form = ManagementForm(data=self.data, prefix=self.prefix, add_label=self.add_label)
if not form.is_valid():
raise forms.ValidationError('Management form missing for %s' % (self.prefix))
else:
# A new unbound formset
n_initial = self.initial and len(self.initial) or 0
form = ManagementForm(prefix=self.prefix,
add_label=self.add_label,
initial={ 'next_form_id': n_initial })
self.management_form = form
def empty_form(self):
f = self.form(prefix=self.make_prefix("TEMPLATE"))
f.fields["_exists"] = BooleanField(initial=True, widget=HiddenInput)
f.fields["_deleted"] = BooleanField(initial=True, required=False, widget=SubmitButton)
return f
def _construct_forms(self):
self._construct_mgmt_form()
self.forms = []
if not self.is_bound:
if self.initial is not None:
for i, data in enumerate(self.initial):
self.forms.append(self.form(initial=data, prefix=self.make_prefix(i)))
else:
self.forms = []
else:
for i in range(0, self.management_form.form_counts()):
# Since the form might be "not valid", you can't use
# cleaned_data to get at these fields.
if self.make_prefix(i) + "-_exists" in self.data:
if self.data.get(self.make_prefix(i) + "-_deleted") != "True":
f = self.form(data=self.data, prefix=self.make_prefix(i))
self.forms.append(f)
if self.management_form.is_valid() and self.management_form.cleaned_data["add"]:
self.add_form()
for f in self.forms:
f.fields["_exists"] = BooleanField(initial=True, widget=HiddenInput)
# Though _deleted is marked as initial=True, the value is only transmitted
# if this is the button that's clicked, so the real default is False.
f.fields["_deleted"] = BooleanField(initial=True, required=False, widget=SubmitButton)
f.fields["_deleted"].widget.label = "(x)"
def add_form(self):
"""Programatically add a form"""
prefix = self.make_prefix(self.management_form.new_form_id())
member = self.form(prefix=prefix)
self.forms.append(member)
def clean(self):
"""Hook for custom cleaning."""
pass
def full_clean(self):
"""Simlar to formsets.py:full_clean"""
self._errors = []
if not self.is_bound:
return
for f in self.forms:
self._errors.append(f.errors)
try:
self.clean()
except ValidationError, e:
self._non_form_errors = e.messages
@property
def errors(self):
if self._errors is None:
self.full_clean()
return self._errors
def non_form_errors(self):
if self._non_form_errors is not None:
return self._non_form_errors
return ErrorList()
def is_valid(self):
if not self.is_bound:
return False
valid = True
# Iterate through all, to find all errors, not just first ones.
for i, f in enumerate(self.forms):
if bool(self.errors[i]) or not f.is_valid():
valid = False
return valid and not bool(self.non_form_errors())
def simple_formset_factory(form, add_label="+", formset=BaseSimpleFormSet, initial=None):
"""Return a FormSet for the given form class."""
attrs = {
'form': form,
'add_label': add_label,
'initial': initial
}
return type(form.__name__ + 'SimpleFormSet', (formset,), attrs)
class DependencyAwareForm(forms.Form):
"""
Inherit from this class and add
(condition name, condition value, child name) tuples
to self.dependencies to describe dependencies between
certain form fields.
The semantic meaning is that the field named "child name"
is required if and only if the field "condition name"
has value "condition value".
For an example, visit the jframegallery ("fields with dependencies").
"""
def clean(self):
ret = super(DependencyAwareForm, self).clean()
if self.errors:
return
for cond, required_value, child in self.dependencies:
if self.cleaned_data.get(cond, None) == required_value:
child_val = self.cleaned_data.get(child)
if child_val in [None, '']:
self._errors.setdefault(child, []).append("%s is required if %s is %s" % (child, cond, str(required_value)))
return ret
def _calculate_data(self):
"""
Returns a "dict" with mappings between ids, desired values, and ids.
"""
def data(cond, required_value, child):
"""Calculates data for single item."""
return self.add_prefix(cond), str(required_value), self.add_prefix(child)
return [ data(*x) for x in self.dependencies ]
def render_dep_metadata(self):
return urllib.quote_plus(json.dumps(self._calculate_data(), separators=(',', ':')))
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/src/desktop/lib/django_forms.py
|
Python
|
gpl-2.0
| 18,454
|
[
"VisIt"
] |
b7969f15db1b2ddd173b22f073b6b53a45e9a984e73bd509b9c9545d21228a82
|
import numpy as np
import monti_carlo as mc
import matplotlib.pylab as plt
from scipy.stats import gaussian_kde
import scipy.integrate as itge
def basic_gaussian_monti_carlo():
# basic gaussian monti carlo
N = 10000
qfargs = (np.array([0.0,0.0]),3.0)
X = mc.monti_carlo_samples(mc.gaussian_random_qfunc,N,qfargs,ndim=2)
xy = np.vstack([X[:,0],X[:,1]])
z = gaussian_kde(xy)(xy)
plt.scatter(X[:,0],X[:,1],c=z,s=100, edgecolor='')
plt.colorbar()
plt.show()
plt.clf()
if __name__ == '__main__':
basic_gaussian_monti_carlo()
|
juhaj/topics-python-in-research
|
sample-answers/monti_carlo_gaussian.py
|
Python
|
gpl-3.0
| 550
|
[
"Gaussian"
] |
0c68ad06ed961170c977aed46217fa7c6a727e18e2c011a185fb91fbba104b06
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provider related utilities
"""
from libcloud.utils.misc import get_driver as _get_provider_driver
from libcloud.utils.misc import set_driver as _set_provider_driver
from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS
from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING
__all__ = [
"Provider",
"DRIVERS",
"get_driver"]
DRIVERS = {
Provider.AZURE:
('libcloud.compute.drivers.azure', 'AzureNodeDriver'),
Provider.DUMMY:
('libcloud.compute.drivers.dummy', 'DummyNodeDriver'),
Provider.EC2_US_EAST:
('libcloud.compute.drivers.ec2', 'EC2NodeDriver'),
Provider.EC2_EU_WEST:
('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'),
Provider.EC2_US_WEST:
('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'),
Provider.EC2_US_WEST_OREGON:
('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'),
Provider.EC2_AP_SOUTHEAST:
('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'),
Provider.EC2_AP_NORTHEAST:
('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'),
Provider.EC2_SA_EAST:
('libcloud.compute.drivers.ec2', 'EC2SAEastNodeDriver'),
Provider.EC2_AP_SOUTHEAST2:
('libcloud.compute.drivers.ec2', 'EC2APSESydneyNodeDriver'),
Provider.ECP:
('libcloud.compute.drivers.ecp', 'ECPNodeDriver'),
Provider.ELASTICHOSTS:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'),
Provider.ELASTICHOSTS_UK1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'),
Provider.ELASTICHOSTS_UK2:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'),
Provider.ELASTICHOSTS_US1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'),
Provider.ELASTICHOSTS_US2:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS2NodeDriver'),
Provider.ELASTICHOSTS_US3:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS3NodeDriver'),
Provider.ELASTICHOSTS_CA1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCA1NodeDriver'),
Provider.ELASTICHOSTS_AU1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsAU1NodeDriver'),
Provider.ELASTICHOSTS_CN1:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCN1NodeDriver'),
Provider.SKALICLOUD:
('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'),
Provider.SERVERLOVE:
('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'),
Provider.CLOUDSIGMA:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'),
Provider.GCE:
('libcloud.compute.drivers.gce', 'GCENodeDriver'),
Provider.GOGRID:
('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'),
Provider.RACKSPACE:
('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'),
Provider.RACKSPACE_FIRST_GEN:
('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'),
Provider.HPCLOUD:
('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'),
Provider.KILI:
('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'),
Provider.VPSNET:
('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'),
Provider.LINODE:
('libcloud.compute.drivers.linode', 'LinodeNodeDriver'),
Provider.RIMUHOSTING:
('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'),
Provider.VOXEL:
('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'),
Provider.SOFTLAYER:
('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'),
Provider.EUCALYPTUS:
('libcloud.compute.drivers.ec2', 'EucNodeDriver'),
Provider.IBM:
('libcloud.compute.drivers.ibm_sce', 'IBMNodeDriver'),
Provider.OPENNEBULA:
('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'),
Provider.BRIGHTBOX:
('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'),
Provider.NIMBUS:
('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'),
Provider.BLUEBOX:
('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'),
Provider.GANDI:
('libcloud.compute.drivers.gandi', 'GandiNodeDriver'),
Provider.OPSOURCE:
('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'),
Provider.DIMENSIONDATA:
('libcloud.compute.drivers.dimensiondata', 'DimensionDataNodeDriver'),
Provider.OPENSTACK:
('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'),
Provider.NINEFOLD:
('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'),
Provider.VCLOUD:
('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'),
Provider.TERREMARK:
('libcloud.compute.drivers.vcloud', 'TerremarkDriver'),
Provider.CLOUDSTACK:
('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'),
Provider.LIBVIRT:
('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'),
Provider.JOYENT:
('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'),
Provider.VCL:
('libcloud.compute.drivers.vcl', 'VCLNodeDriver'),
Provider.KTUCLOUD:
('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'),
Provider.HOSTVIRTUAL:
('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'),
Provider.ABIQUO:
('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'),
Provider.DIGITAL_OCEAN:
('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'),
Provider.NEPHOSCALE:
('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'),
Provider.CLOUDFRAMES:
('libcloud.compute.drivers.cloudframes', 'CloudFramesNodeDriver'),
Provider.EXOSCALE:
('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'),
Provider.IKOULA:
('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'),
Provider.OUTSCALE_SAS:
('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'),
Provider.OUTSCALE_INC:
('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'),
Provider.VSPHERE:
('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'),
Provider.PROFIT_BRICKS:
('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'),
Provider.VULTR:
('libcloud.compute.drivers.vultr', 'VultrNodeDriver'),
Provider.AURORACOMPUTE:
('libcloud.compute.drivers.auroracompute', 'AuroraComputeNodeDriver'),
Provider.CLOUDWATT:
('libcloud.compute.drivers.cloudwatt', 'CloudwattNodeDriver'),
Provider.PACKET:
('libcloud.compute.drivers.packet', 'PacketNodeDriver'),
Provider.ONAPP:
('libcloud.compute.drivers.onapp', 'OnAppNodeDriver'),
Provider.RUNABOVE:
('libcloud.compute.drivers.runabove', 'RunAboveNodeDriver'),
# Deprecated
Provider.CLOUDSIGMA_US:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'),
}
def get_driver(provider):
if provider in DEPRECATED_RACKSPACE_PROVIDERS:
id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()])
old_name = id_to_name_map[provider]
new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]]
url = 'http://s.apache.org/lc0140un'
msg = ('Provider constant %s has been removed. New constant '
'is now called %s.\n'
'For more information on this change and how to modify your '
'code to work with it, please visit: %s' %
(old_name, new_name, url))
raise Exception(msg)
return _get_provider_driver(DRIVERS, provider)
def set_driver(provider, module, klass):
return _set_provider_driver(DRIVERS, provider, module, klass)
|
jimbobhickville/libcloud
|
libcloud/compute/providers.py
|
Python
|
apache-2.0
| 8,294
|
[
"VisIt"
] |
201415e1b2adf99d6a268b619b7ddf2041c12ff014d27dfd8c655815a3524339
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import requests
import sys
import logging
logger = logging.getLogger('remindor_common')
from remindor_common.services.post import Post
class Boxcar():
api_url = 'https://new.boxcar.io/api/notifications'
def __init__(self, token):
self.token = token
def notify(self, title, message, callback = None, err_callback = None):
data = {'user_credentials': self.token, 'notification[title]': title,
'notification[long_message]': message, 'notification[source_name]': 'Remindor'}
b = Post(self.api_url, data)
b.start()
|
bhdouglass/remindor-common
|
remindor_common/services/boxcar.py
|
Python
|
gpl-3.0
| 1,338
|
[
"Brian"
] |
e0d49494b852921b0304c7f2628fb9744c4f028a4e5a5007fa2e6faab414abce
|
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
sample_weights_orth, \
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
init_state=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids[0])
else:
init_state = TT.alloc(floatX(0), self.n_hids[0])
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentLayer(Layer):
"""
Standard recurrent layer with gates.
See arXiv verion of our paper.
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gating = False,
reseting = False,
gater_activation = TT.nnet.sigmoid,
reseter_activation = TT.nnet.sigmoid,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type gating: bool
:param gating: If true, an update gate is used
:type reseting: bool
:param reseting: If true, a reset gate is used
:type gater_activation: string or function
:param name: The activation function of the update gate
:type reseter_activation: string or function
:param name: The activation function of the reset gate
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(gater_activation) is str or type(gater_activation) is unicode:
gater_activation = eval(gater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
self.gating = gating
self.reseting = reseting
self.gater_activation = gater_activation
self.reseter_activation = reseter_activation
assert rng is not None, "random number generator should not be empty!"
super(RecurrentLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
if self.gating:
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
if self.reseting:
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.noise_params = [self.nW_hh]
if self.gating:
self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.noise_params += [self.nG_hh]
if self.reseting:
self.nR_hh = theano.shared(self.R_hh.get_value()*0, name='noise_'+self.R_hh.name)
self.noise_params += [self.nR_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
gater_below = None,
reseter_below = None,
use_noise=True,
no_noise_bias = False):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type gater_below: theano variable
:param gater_below: the input to the update gate
:type reseter_below: theano variable
:param reseter_below: the input to the reset gate
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
if self.gating:
G_hh = self.G_hh + self.nG_hh
if self.reseting:
R_hh = self.R_hh + self.nR_hh
else:
W_hh = self.W_hh
if self.gating:
G_hh = self.G_hh
if self.reseting:
R_hh = self.R_hh
# Reset gate:
# optionally reset the hidden state.
if self.reseting and reseter_below:
reseter = self.reseter_activation(TT.dot(state_before, R_hh) +
reseter_below)
reseted_state_before = reseter * state_before
else:
reseted_state_before = state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, W_hh) + state_below
h = self.activation(preactiv)
# Update gate:
# optionally reject the potential new state and use the new one.
if self.gating and gater_below:
gater = self.gater_activation(TT.dot(state_before, G_hh) +
gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if gater_below:
gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))
if reseter_below:
reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
# FIXME: Find a way to clean this up
if self.reseting and reseter_below:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below, reseter_below]
fn = lambda x,y,g,r,z : self.step_fprop(x,y,z, gater_below=g, reseter_below=r, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below, reseter_below]
fn = lambda tx, tg,tr, ty: self.step_fprop(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask, reseter_below]
fn = lambda x,y,r,z : self.step_fprop(x,y,z, use_noise=use_noise,
reseter_below=r,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, reseter_below]
fn = lambda tx,tr,ty: self.step_fprop(tx, None, ty,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below]
fn = lambda x,y,g,z : self.step_fprop(x,y,z, gater_below=g, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below]
fn = lambda tx, tg, ty: self.step_fprop(tx, None, ty, gater_below=tg,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates =updates
return self.out
class LSTMLayer(Layer):
"""
Standard LSTM Layer
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
name=None,
**kwargs):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(LSTMLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hi = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Whi_%s"%self.name)
self.params = [self.W_hi]
self.W_ci = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wci_%s"%self.name)
self.params += [self.W_ci]
self.W_hf = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Whf_%s"%self.name)
self.params += [self.W_hf]
self.W_cf = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_cf]
self.W_hc = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_hc]
self.W_ho = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_ho]
self.W_co = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_co]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.noise_params = [theano.shared(p.get_value()*0, name='noise_'+p.name) for p in self.params]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def _get_slice_below(self, state_below, to='cell'):
if to == 'cell':
offset = 0
elif to == 'input':
offset = 1 * self.n_hids
elif to == 'output':
offset = 2 * self.n_hids
elif to == 'forget':
offset = 3 * self.n_hids
else:
raise Warning('Unknown gate/cell types')
if state_below.ndim == 3:
return state_below[:,:,offset:offset+self.n_hids]
if state_below.ndim == 2:
return state_below[:,offset:offset+self.n_hids]
return state_below[offset:offset+self.n_hids]
def _get_slice_before(self, state_before, fr='cell'):
if fr == 'cell':
offset = self.n_hids
elif fr == 'hidden':
offset = 0
else:
raise Warning('Unknown cell/gate types')
if state_before.ndim == 2:
return state_before[:,offset:offset+self.n_hids]
return state_before[offset:offset+self.n_hids]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
use_noise=True,
no_noise_bias = False,
**kwargs):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hi = self.W_hi + self.nW_hi
W_ci = self.W_ci + self.nW_ci
W_hf = self.W_hf + self.nW_hf
W_cf = self.W_cf + self.nW_cf
W_hc = self.W_hc + self.nW_hc
W_ho = self.W_ho + self.nW_ho
W_co = self.W_co + self.nW_co
else:
W_hi = self.W_hi
W_ci = self.W_ci
W_hf = self.W_hf
W_cf = self.W_cf
W_hc = self.W_hc
W_ho = self.W_ho
W_co = self.W_co
# input gate
ig = TT.nnet.sigmoid(self._get_slice_below(state_below,'input') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hi) +
TT.dot(self._get_slice_before(state_before,'cell'), W_ci))
# forget gate
fg = TT.nnet.sigmoid(self._get_slice_below(state_below,'forget') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hf) +
TT.dot(self._get_slice_before(state_before,'cell'), W_cf))
# cell
cc = fg * self._get_slice_before(state_before,'cell') + \
ig * self.activation(self._get_slice_below(state_below,'cell') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hc))
# output gate
og = TT.nnet.sigmoid(self._get_slice_below(state_below,'output') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_ho) +
TT.dot(cc, W_co))
# hidden state
hh = og * self.activation(cc)
if hh.ndim == 2:
h = TT.concatenate([hh, cc], axis=1)
else:
h = TT.concatenate([hh, cc], axis=0)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False,
**kwargs
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, state_below.shape[-1]))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids * 2)
else:
init_state = TT.alloc(floatX(0), self.n_hids * 2)
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates = updates
return self.out
|
hezhenghao/GroundHog
|
groundhog/layers/rec_layers.py
|
Python
|
bsd-3-clause
| 61,607
|
[
"Gaussian"
] |
a61c461fc875cbaedf968a551903143c7b3c1f3d7e237a6213b4efdd9df10efd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import ldap
import re
import urllib
from nose.plugins.attrib import attr
from nose.tools import assert_true, assert_equal, assert_false
from desktop.lib.django_test_util import make_logged_in_client
from django.contrib.auth.models import User, Group
from django.utils.encoding import smart_unicode
from django.core.urlresolvers import reverse
from useradmin.models import HuePermission, GroupPermission, UserProfile
from useradmin.models import get_profile, get_default_user_group
import useradmin.conf
from hadoop import pseudo_hdfs4
def reset_all_users():
"""Reset to a clean state by deleting all users"""
for user in User.objects.all():
user.delete()
def reset_all_groups():
"""Reset to a clean state by deleting all groups"""
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing(None)
for grp in Group.objects.all():
grp.delete()
class LdapTestConnection(object):
"""
Test class which mimics the behaviour of LdapConnection (from ldap_access.py).
It also includes functionality to fake modifications to an LDAP server. It is designed
as a singleton, to allow for changes to persist across discrete connections.
This class assumes uid is the user_name_attr.
"""
def __init__(self):
self._instance = LdapTestConnection.Data()
def add_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].append(user)
def remove_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].remove(user)
def add_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].append(user)
def remove_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].remove(user)
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Returns info for a particular user via a case insensitive search """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == username_pattern, self._instance.users.values())
else:
username_pattern = "^%s$" % username_pattern.replace('.','\\.').replace('*', '.*')
username_fsm = re.compile(username_pattern, flags=re.I)
usernames = filter(lambda username: username_fsm.match(username), self._instance.users.keys())
data = [self._instance.users.get(username) for username in usernames]
return data
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Return all groups in the system with parents and children """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == groupname_pattern, self._instance.groups.values())
# SCOPE_SUBTREE means we return all sub-entries of the desired entry along with the desired entry.
if data and scope == ldap.SCOPE_SUBTREE:
sub_data = filter(lambda attrs: attrs['dn'].endswith(data[0]['dn']), self._instance.groups.values())
data.extend(sub_data)
else:
groupname_pattern = "^%s$" % groupname_pattern.replace('.','\\.').replace('*', '.*')
groupnames = filter(lambda username: re.match(groupname_pattern, username), self._instance.groups.keys())
data = [self._instance.groups.get(groupname) for groupname in groupnames]
return data
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE):
members = []
for group_info in self._instance.groups:
if group_info['dn'] == dn:
members.extend(group_info['members'])
members = set(members)
users = []
for user_info in self._instance.users:
if user_info['dn'] in members:
users.append(user_info)
groups = []
for group_info in self._instance.groups:
if group_info['dn'] in members:
groups.append(group_info)
return users + groups
def find_users_of_group(self, dn):
members = []
for group_info in self._instance.groups.values():
if group_info['dn'] == dn:
members.extend(group_info['members'])
members = set(members)
users = []
for user_info in self._instance.users.values():
if user_info['dn'] in members:
users.append(user_info)
return users
def find_groups_of_group(self, dn):
members = []
for group_info in self._instance.groups.values():
if group_info['dn'] == dn:
members.extend(group_info['members'])
groups = []
for group_info in self._instance.groups.values():
if group_info['dn'] in members:
groups.append(group_info)
return groups
class Data:
def __init__(self):
self.users = {'moe': {'dn': 'uid=moe,ou=People,dc=example,dc=com', 'username':'moe', 'first':'Moe', 'email':'moe@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com']},
'lårry': {'dn': 'uid=lårry,ou=People,dc=example,dc=com', 'username':'lårry', 'first':'Larry', 'last':'Stooge', 'email':'larry@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'curly': {'dn': 'uid=curly,ou=People,dc=example,dc=com', 'username':'curly', 'first':'Curly', 'last':'Stooge', 'email':'curly@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'Rock': {'dn': 'uid=Rock,ou=People,dc=example,dc=com', 'username':'Rock', 'first':'rock', 'last':'man', 'email':'rockman@stooges.com', 'groups': ['cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'nestedguy': {'dn': 'uid=nestedguy,ou=People,dc=example,dc=com', 'username':'nestedguy', 'first':'nested', 'last':'guy', 'email':'nestedguy@stooges.com', 'groups': ['cn=NestedGroup,ou=Groups,dc=example,dc=com']},
'otherguy': {'dn': 'uid=otherguy,ou=People,dc=example,dc=com', 'username':'otherguy', 'first':'Other', 'last':'Guy', 'email':'other@guy.com'},
'posix_person': {'dn': 'uid=posix_person,ou=People,dc=example,dc=com', 'username': 'posix_person', 'first': 'pos', 'last': 'ix', 'email': 'pos@ix.com'},
'posix_person2': {'dn': 'uid=posix_person2,ou=People,dc=example,dc=com', 'username': 'posix_person2', 'first': 'pos', 'last': 'ix', 'email': 'pos@ix.com'}}
self.groups = {'TestUsers': {
'dn': 'cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'TestUsers',
'members':['uid=moe,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com'],
'posix_members':[]},
'Test Administrators': {
'dn': 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'Test Administrators',
'members':['uid=Rock,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com'],
'posix_members':[]},
'OtherGroup': {
'dn': 'cn=OtherGroup,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'OtherGroup',
'members':[],
'posix_members':[]},
'NestedGroups': {
'dn': 'cn=NestedGroups,ou=Groups,dc=example,dc=com',
'name':'NestedGroups',
'members':['cn=NestedGroup,ou=Groups,dc=example,dc=com'],
'posix_members':[]
},
'NestedGroup': {
'dn': 'cn=NestedGroup,ou=Groups,dc=example,dc=com',
'name':'NestedGroup',
'members':['uid=nestedguy,ou=People,dc=example,dc=com'],
'posix_members':[]
},
'NestedPosixGroups': {
'dn': 'cn=NestedPosixGroups,ou=Groups,dc=example,dc=com',
'name':'NestedPosixGroups',
'members':['cn=PosixGroup,ou=Groups,dc=example,dc=com'],
'posix_members':[]
},
'PosixGroup': {
'dn': 'cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup',
'members':[],
'posix_members':['posix_person','lårry']},
'PosixGroup1': {
'dn': 'cn=PosixGroup1,cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup1',
'members':[],
'posix_members':['posix_person2']},
}
def test_invalid_username():
BAD_NAMES = ('-foo', 'foo:o', 'foo o', ' foo')
c = make_logged_in_client(username="test", is_superuser=True)
for bad_name in BAD_NAMES:
assert_true(c.get('/useradmin/users/new'))
response = c.post('/useradmin/users/new', dict(username=bad_name, password1="test", password2="test"))
assert_true('not allowed' in response.context["form"].errors['username'][0])
def test_group_permissions():
reset_all_users()
reset_all_groups()
# Get ourselves set up with a user and a group
c = make_logged_in_client(username="test", is_superuser=True)
Group.objects.create(name="test-group")
test_user = User.objects.get(username="test")
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Make sure that a superuser can always access applications
response = c.get('/useradmin/users')
assert_true('Hue Users' in response.content)
assert_true(len(GroupPermission.objects.all()) == 0)
c.post('/useradmin/groups/edit/test-group',
dict(name="test-group",
members=[User.objects.get(username="test").pk],
permissions=[HuePermission.objects.get(app='useradmin',action='access').pk],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 1)
# Now test that we have limited access
c1 = make_logged_in_client(username="nonadmin", is_superuser=False)
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
# Add the non-admin to a group that should grant permissions to the app
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name='test-group'))
test_user.save()
# Check that we have access now
response = c1.get('/useradmin/users')
assert_true(get_profile(test_user).has_hue_permission('access','useradmin'))
assert_true('Hue Users' in response.content)
# Make sure we can't modify permissions
response = c1.get('/useradmin/permissions/edit/useradmin/access')
assert_true('must be a superuser to change permissions' in response.content)
# And revoke access from the group
c.post('/useradmin/permissions/edit/useradmin/access',
dict(app='useradmin',
priv='access',
groups=[],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 0)
assert_false(get_profile(test_user).has_hue_permission('access','useradmin'))
# We should no longer have access to the app
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
def test_default_group():
reset_all_users()
reset_all_groups()
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
get_default_user_group()
c = make_logged_in_client(username='test', is_superuser=True)
# Create default group if it doesn't already exist.
assert_true(Group.objects.filter(name='test_default').exists())
# Try deleting the default group
assert_true(Group.objects.filter(name='test_default').exists())
response = c.post('/useradmin/groups/delete/test_default')
assert_true('default user group may not be deleted' in response.content)
assert_true(Group.objects.filter(name='test_default').exists())
# Change the name of the default group, and try deleting again
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('new_default')
response = c.post('/useradmin/groups/delete/test_default')
assert_false(Group.objects.filter(name='test_default').exists())
assert_true(Group.objects.filter(name='new_default').exists())
def test_get_profile():
# Ensure profiles are created after get_profile is called.
reset_all_users()
reset_all_groups()
c = make_logged_in_client(username='test', password='test', is_superuser=True)
assert_equal(0, UserProfile.objects.count())
p = get_profile(User.objects.get(username='test'))
assert_equal(1, UserProfile.objects.count())
def test_group_admin():
reset_all_users()
reset_all_groups()
c = make_logged_in_client(username="test", is_superuser=True)
response = c.get('/useradmin/groups')
# No groups just yet
assert_true(len(response.context["groups"]) == 0)
assert_true("Hue Groups" in response.content)
# Create a group
response = c.get('/useradmin/groups/new')
assert_equal('/useradmin/groups/new', response.context['action'])
c.post('/useradmin/groups/new', dict(name="testgroup"))
# We should have an empty group in the DB now
assert_true(len(Group.objects.all()) == 1)
assert_true(Group.objects.filter(name="testgroup").exists())
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 0)
# And now, just for kicks, let's try adding a user
response = c.post('/useradmin/groups/edit/testgroup',
dict(name="testgroup",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 1)
assert_true(Group.objects.get(name="testgroup").user_set.filter(username="test").exists())
# Test some permissions
c2 = make_logged_in_client(username="nonadmin", is_superuser=False)
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="access-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name="access-group"))
test_user.save()
# Make sure non-superusers can't do bad things
response = c2.get('/useradmin/groups/new')
assert_true("You must be a superuser" in response.content)
response = c2.get('/useradmin/groups/edit/testgroup')
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/new', dict(name="nonsuperuser"))
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/edit/testgroup',
dict(name="nonsuperuser",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true("You must be a superuser" in response.content)
# Should be one group left, because we created the other group
response = c.post('/useradmin/groups/delete/testgroup')
assert_true(len(Group.objects.all()) == 1)
group_count = len(Group.objects.all())
response = c.post('/useradmin/groups/new', dict(name="with space"))
assert_equal(len(Group.objects.all()), group_count + 1)
def test_user_admin():
FUNNY_NAME = '~`!@#$%^&*()_-+={}[]|\;"<>?/,.'
FUNNY_NAME_QUOTED = urllib.quote(FUNNY_NAME)
reset_all_users()
reset_all_groups()
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
c = make_logged_in_client('test', is_superuser=True)
user = User.objects.get(username='test')
# Test basic output.
response = c.get('/useradmin/')
assert_true(len(response.context["users"]) > 0)
assert_true("Hue Users" in response.content)
# Test editing a superuser
# Just check that this comes back
response = c.get('/useradmin/users/edit/test')
# Edit it, to add a first and last name
response = c.post('/useradmin/users/edit/test',
dict(username="test",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("User information updated" in response.content,
"Notification should be displayed in: %s" % response.content)
# Edit it, can't change username
response = c.post('/useradmin/users/edit/test',
dict(username="test2",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("You cannot change a username" in response.content)
# Now make sure that those were materialized
response = c.get('/useradmin/users/edit/test')
assert_equal(smart_unicode("Inglés"), response.context["form"].instance.first_name)
assert_true("Español" in response.content)
# Shouldn't be able to demote to non-superuser
response = c.post('/useradmin/users/edit/test', dict(username="test",
first_name=u"Inglés", last_name=u"Español",
is_superuser=False, is_active=True))
assert_true("You cannot remove" in response.content,
"Shouldn't be able to remove the last superuser")
# Shouldn't be able to delete oneself
response = c.post('/useradmin/users/delete', {u'user_ids': [user.id]})
assert_true("You cannot remove yourself" in response.content,
"Shouldn't be able to delete the last superuser")
# Let's try changing the password
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", is_superuser=True, password1="foo", password2="foobar"))
assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors, "Should have complained about mismatched password")
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="foo", password2="foo", is_active=True, is_superuser=True))
assert_true(User.objects.get(username="test").is_superuser)
assert_true(User.objects.get(username="test").check_password("foo"))
# Change it back!
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="test", password2="test", is_active="True", is_superuser="True"))
assert_true(User.objects.get(username="test").check_password("test"))
assert_true(make_logged_in_client(username = "test", password = "test"),
"Check that we can still login.")
# Check new user form for default group
group = get_default_user_group()
response = c.get('/useradmin/users/new')
assert_true(response)
assert_true(('<option value="1" selected="selected">%s</option>' % group) in str(response))
# Create a new regular user (duplicate name)
response = c.post('/useradmin/users/new', dict(username="test", password1="test", password2="test"))
assert_equal({ 'username': ["User with this Username already exists."]}, response.context["form"].errors)
# Create a new regular user (for real)
response = c.post('/useradmin/users/new', dict(username=FUNNY_NAME,
password1="test",
password2="test",
is_active="True"))
response = c.get('/useradmin/')
assert_true(FUNNY_NAME_QUOTED in response.content)
assert_true(len(response.context["users"]) > 1)
assert_true("Hue Users" in response.content)
# Validate profile is created.
assert_true(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="test-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
# Verify that we can modify user groups through the user admin pages
response = c.post('/useradmin/users/new', dict(username="group_member", password1="test", password2="test", groups=[group.pk]))
User.objects.get(username='group_member')
assert_true(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
response = c.post('/useradmin/users/edit/group_member', dict(username="group_member", password1="test", password2="test", groups=[]))
assert_false(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
# Check permissions by logging in as the new user
c_reg = make_logged_in_client(username=FUNNY_NAME, password="test")
test_user = User.objects.get(username=FUNNY_NAME)
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Regular user should be able to modify oneself
response = c_reg.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = True))
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,))
assert_equal("Hello", response.context["form"].instance.first_name)
funny_user = User.objects.get(username=FUNNY_NAME)
# Can't edit other people.
response = c_reg.post("/useradmin/users/delete", {u'user_ids': [funny_user.id]})
assert_true("You must be a superuser" in response.content,
"Regular user can't edit other people")
# Revert to regular "test" user, that has superuser powers.
c_su = make_logged_in_client()
# Inactivate FUNNY_NAME
c_su.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = False))
# Now make sure FUNNY_NAME can't log back in
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,))
assert_true(response.status_code == 302 and "login" in response["location"],
"Inactivated user gets redirected to login page")
# Delete that regular user
funny_profile = get_profile(test_user)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(id=funny_profile.id).exists())
# Bulk delete users
u1 = User.objects.create(username='u1', password="u1")
u2 = User.objects.create(username='u2', password="u2")
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 2)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [u1.id, u2.id]})
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 0)
# Make sure that user deletion works if the user has never performed a request.
funny_user = User.objects.create(username=FUNNY_NAME, password='test')
assert_true(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# You shouldn't be able to create a user without a password
response = c_su.post('/useradmin/users/new', dict(username="test"))
assert_true("You must specify a password when creating a new user." in response.content)
@attr('requires_hadoop')
def test_ensure_home_directory():
reset_all_users()
reset_all_groups()
# Cluster and client for home directory creation
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname='test1')
cluster.fs.setuser(cluster.superuser)
# Create a user with a home directory
assert_false(cluster.fs.exists('/user/test1'))
response = c.post('/useradmin/users/new', dict(username="test1", password1='test', password2='test', ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test1'))
dir_stat = cluster.fs.stats('/user/test1')
assert_equal('test1', dir_stat.user)
assert_equal('test1', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
# Create a user, then add their home directory
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/new', dict(username="test2", password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/edit/%s' % "test2", dict(username="test2", password1='test', password2='test', ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test2'))
dir_stat = cluster.fs.stats('/user/test2')
assert_equal('test2', dir_stat.user)
assert_equal('test2', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
def test_list_for_autocomplete():
c1 = make_logged_in_client('test_list_for_autocomplete', is_superuser=False, groupname='test_list_for_autocomplete')
c2_same_group = make_logged_in_client('test_list_for_autocomplete2', is_superuser=False, groupname='test_list_for_autocomplete')
c3_other_group = make_logged_in_client('test_list_for_autocomplete3', is_superuser=False, groupname='test_list_for_autocomplete_other_group')
# c1 is in the same group as c2
response = c1.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [user['username'] for user in content['users']]
groups = [user['name'] for user in content['groups']]
assert_equal(['test_list_for_autocomplete2'], users)
assert_true('test_list_for_autocomplete' in groups, groups)
assert_true('test_list_for_autocomplete_other_group' in groups, groups)
# c2 is in the same group as c1
response = c2_same_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [user['username'] for user in content['users']]
groups = [user['name'] for user in content['groups']]
assert_equal(['test_list_for_autocomplete'], users)
assert_true('test_list_for_autocomplete' in groups, groups)
assert_true('test_list_for_autocomplete_other_group' in groups, groups)
# c3 is alone except for groups
response = c3_other_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [user['username'] for user in content['users']]
groups = [user['name'] for user in content['groups']]
assert_equal([], users)
assert_true('test_list_for_autocomplete' in groups, groups)
assert_true('test_list_for_autocomplete_other_group' in groups, groups)
|
2013Commons/hue
|
apps/useradmin/src/useradmin/tests.py
|
Python
|
apache-2.0
| 28,036
|
[
"MOE"
] |
cde91bcbecd5776fa8de346d6fd06a3a2f5883436081c7e991e5dcbf28947747
|
../../../../../../../share/pyshared/orca/scripts/apps/gnome-mud/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/gnome-mud/__init__.py
|
Python
|
gpl-3.0
| 75
|
[
"ORCA"
] |
79eed7b7767288ee77856c706cf6a543324bd36f63b5cd74a8b79e89da671ea4
|
import torch
#
# def test_fi():
# input = torch.LongTensor([[0, 0], [0, 1], [1, 0], [1, 1]])
# expected = torch.LongTensor([0, 1, 2, 3])
#
# actual = gaussian.fi(input, (2,2))
#
# print(actual)
#
#
# def test_fi_mat():
# input = torch.LongTensor([[[0, 0], [0, 1], [1, 0], [1, 1]]])
# expected = torch.LongTensor([0, 1, 2, 3])
#
# actual = gaussian.fi_matrix(input, torch.LongTensor((2, 2)))
#
# print(actual)
#
# def test_sort():
# indices = torch.LongTensor([[[6, 3], [1, 2]], [[5, 8], [1, 3]]])
# vals = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4]])
#
# hyper.sort(indices, vals)
#
# print(indices)
# print(vals)
#
#
#
# if __name__ == '__main__':
# # unittest.main()
#
# test_fi()
# test_fi_mat()
|
MaestroGraph/sparse-hyper
|
tests/tests.py
|
Python
|
mit
| 761
|
[
"Gaussian"
] |
f93187942d9a190d90e09f49e547a6970e82936fcf9258bc391ba5e55ff4b1d3
|
"""
Unit tests for reads objects. This is used for all tests
that can be performed in isolation from input data.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.server.datamodel.reads as reads
import ga4gh.schemas.protocol as protocol
class TestParseMalformedBamHeader(unittest.TestCase):
"""
Tests for parsing of malformed bam headers.
reads.parseMalformedBamHeader should not modify correct parsed
headers and should parse out additional fields separated by spaces
(instead of tabs as defined in the SAM spec).
"""
def testGoodHeaderUnmodified(self):
header = {'SO': 'coordinate', 'VN': '1.0'}
self.assertEqual(header, reads.parseMalformedBamHeader(header))
def testOriginalTypesUnmodified(self):
# note real field tags, just checking that types are preserved
header = {'int': 2845856850,
'float': 206.6,
'bool': True,
'string': '123'}
self.assertEqual(header, reads.parseMalformedBamHeader(header))
def testCommandsWithSpacesNotParsed(self):
header = {'CL': 'bwa aln -q 15 -f $sai_file ' +
'$reference_fasta $fastq_file\tPP:bwa_index',
'ID': 'bwa_aln_fastq',
'PN': 'bwa',
'VN': '0.5.9-r16'}
self.assertEqual(header, reads.parseMalformedBamHeader(header))
def testSpaceSeparatedUnparsedFieldsParsed(self):
header = {'LN': 249250621,
'M5': '1b22b98cdeb4a9304cb5d48026a85128',
'SN': '1',
'UR': 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/' +
'technical/reference/phase2_reference_assembly' +
'_sequence/hs37d5.fa.gz AS:NCBI37' +
' SP:Human'}
expected = {'LN': 249250621,
'M5': '1b22b98cdeb4a9304cb5d48026a85128',
'SN': '1',
'UR': 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/' +
'technical/reference/phase2_reference_assembly' +
'_sequence/hs37d5.fa.gz',
'AS': 'NCBI37',
'SP': 'Human'}
self.assertEqual(expected, reads.parseMalformedBamHeader(header))
class TestSamCigar(unittest.TestCase):
"""
Test Sam Cigar class handles Cigar mappings correctly
The integer codes are defined in the SAM spec. Thus, the ordering of
SamCigar.cigarStrings implicitly implements this spec.
"""
def testAlignmentMatch(self):
self.assertEqual(0, reads.SamCigar.ga2int(
protocol.CigarUnit.ALIGNMENT_MATCH))
self.assertEqual(protocol.CigarUnit.ALIGNMENT_MATCH,
reads.SamCigar.int2ga(0))
def testInsertion(self):
self.assertEqual(1, reads.SamCigar.ga2int(
protocol.CigarUnit.INSERT))
self.assertEqual(protocol.CigarUnit.INSERT,
reads.SamCigar.int2ga(1))
def testDeletion(self):
self.assertEqual(2, reads.SamCigar.ga2int(
protocol.CigarUnit.DELETE))
self.assertEqual(protocol.CigarUnit.DELETE,
reads.SamCigar.int2ga(2))
def testSkipped(self):
self.assertEqual(3, reads.SamCigar.ga2int(
protocol.CigarUnit.SKIP))
self.assertEqual(protocol.CigarUnit.SKIP,
reads.SamCigar.int2ga(3))
def testSoftClipping(self):
self.assertEqual(4, reads.SamCigar.ga2int(
protocol.CigarUnit.CLIP_SOFT))
self.assertEqual(protocol.CigarUnit.CLIP_SOFT,
reads.SamCigar.int2ga(4))
def testHardClipping(self):
self.assertEqual(5, reads.SamCigar.ga2int(
protocol.CigarUnit.CLIP_HARD))
self.assertEqual(protocol.CigarUnit.CLIP_HARD,
reads.SamCigar.int2ga(5))
def testPadding(self):
self.assertEqual(6, reads.SamCigar.ga2int(
protocol.CigarUnit.PAD))
self.assertEqual(protocol.CigarUnit.PAD,
reads.SamCigar.int2ga(6))
def testSequenceMatch(self):
self.assertEqual(7, reads.SamCigar.ga2int(
protocol.CigarUnit.SEQUENCE_MATCH))
self.assertEqual(protocol.CigarUnit.SEQUENCE_MATCH,
reads.SamCigar.int2ga(7))
def testSequenceMismatch(self):
self.assertEqual(8, reads.SamCigar.ga2int(
protocol.CigarUnit.SEQUENCE_MISMATCH))
self.assertEqual(protocol.CigarUnit.SEQUENCE_MISMATCH,
reads.SamCigar.int2ga(8))
class TestSamFlags(unittest.TestCase):
"""
Tests SamFlags utilities for checking the status of and
setting flags.
Flags are defined by the SAM spec.
"""
def setUp(self):
self.flag = 0x0
def testPairedReadFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.READ_PAIRED)
self.assertEqual(0x1, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.READ_PAIRED))
def testProperPairReadFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.READ_PROPER_PAIR)
self.assertEqual(0x2, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.READ_PROPER_PAIR))
def testUnmappedReadFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.READ_UNMAPPED)
self.assertEqual(0x4, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.READ_UNMAPPED))
def testUnmappedMateFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.MATE_UNMAPPED)
self.assertEqual(0x8, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.MATE_UNMAPPED))
def testReverseStrandReadFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.READ_REVERSE_STRAND)
self.assertEqual(0x10, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.READ_REVERSE_STRAND))
def testReverseStrandMateFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.MATE_REVERSE_STRAND)
self.assertEqual(0x20, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.MATE_REVERSE_STRAND))
def testFirstPairFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.FIRST_IN_PAIR)
self.assertEqual(0x40, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.FIRST_IN_PAIR))
def testSecondPairFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.SECOND_IN_PAIR)
self.assertEqual(0x80, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.SECOND_IN_PAIR))
def testSecondaryAlignmentFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.SECONDARY_ALIGNMENT)
self.assertEqual(0x100, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.SECONDARY_ALIGNMENT))
def testFailedQualityCheckFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.FAILED_QUALITY_CHECK)
self.assertEqual(0x200, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.FAILED_QUALITY_CHECK))
def testDuplicateReadFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.DUPLICATE_READ)
self.assertEqual(0x400, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.DUPLICATE_READ))
def testSupplementaryAlignmentFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.SUPPLEMENTARY_ALIGNMENT)
self.assertEqual(0x800, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.SUPPLEMENTARY_ALIGNMENT))
def testFlagNotSet(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.READ_PAIRED)
self.assertFalse(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.READ_REVERSE_STRAND))
def testComboFlag(self):
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.READ_PAIRED)
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.FIRST_IN_PAIR)
self.flag = reads.SamFlags.setFlag(
self.flag, reads.SamFlags.FAILED_QUALITY_CHECK)
self.assertEqual(0x241, self.flag)
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.READ_PAIRED))
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.FIRST_IN_PAIR))
self.assertTrue(reads.SamFlags.isFlagSet(
self.flag, reads.SamFlags.FAILED_QUALITY_CHECK))
|
ga4gh/server
|
tests/unit/test_reads.py
|
Python
|
apache-2.0
| 9,352
|
[
"BWA"
] |
ebfdb9ac5aead84097c093e805c23bde830158be7d9716931d168b8905802e96
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should running `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
if __name__ == "__main__":
sys.exit(main())
|
beyondblog/kubernetes
|
hack/verify-flags-underscore.py
|
Python
|
apache-2.0
| 8,858
|
[
"VisIt"
] |
5113f21e31e0fe60088b55ef38fecf25d9e50576b3051fe6ead7b3377395e1cd
|
# -*- coding: utf-8 -*-
import unittest
import os
from os.path import (
join,
abspath,
dirname
)
import json
from tempfile import NamedTemporaryFile
import filecmp
import conftest
from libnano.fileio import gb_reader, gb_writer
LOCAL_DIR: str = abspath(dirname(__file__))
class TestGBWriter(unittest.TestCase):
def setUp(self):
self.good_files = [ 'sample.gb',
'mds42_full.gb',
'mds42_recode.gb',
'sample_complex.gb'
]
self.bad_files = ['failed.gb', # fails due to ApE double COMMENT key
'mds42_recode_biopython.gb' # biopython has 80 character lines and we do 79
]
self.maxDiff = None # assertEquals will print out whole diff
# end def
def checkFile(self, fn, should_be_true):
fn_gb = join(LOCAL_DIR, 'test_data', fn)
d_gb = gb_reader.parse(fn_gb, is_ordered=True)
f_temp = NamedTemporaryFile(mode='w', delete=False, encoding='utf-8')
gb_writer.write(f_temp, d_gb)
f_temp.close()
if should_be_true:
self.assertTrue(filecmp.cmp(fn_gb, f_temp.name))
else:
self.assertFalse(filecmp.cmp(fn_gb, f_temp.name))
os.unlink(f_temp.name)
self.assertFalse(os.path.exists(f_temp.name))
# end def
def test_goodFiles(self):
for fn in self.good_files:
self.checkFile(fn, True)
def test_badFiles(self):
for fn in self.bad_files:
self.checkFile(fn, False)
# end class
if __name__ == '__main__':
unittest.main(verbosity=2)
|
libnano/libnano
|
tests/gb_writer_test.py
|
Python
|
gpl-2.0
| 1,651
|
[
"Biopython"
] |
194138a65a1ac4afb7a381593a93a2a2a47361fff712921628d09216b841ceec
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0055_auto_20150815_0213'),
]
operations = [
migrations.AddField(
model_name='visit',
name='involvement_types',
field=models.ManyToManyField(related_name='vists', to='visit.InvolvementType', blank=True),
),
]
|
koebbe/homeworks
|
visit/migrations/0056_visit_involvement_types.py
|
Python
|
mit
| 461
|
[
"VisIt"
] |
1085de930dcd83835c643654146764309739d93c0b739f9be097c5db454370e6
|
#!/usr/bin/env python
#
# Copyright (c) 2000 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
import re
import string
import types
import binascii
import std
true = 1
false = 0
import mojosixbit
class Error(StandardError):
pass
# re for matching mojosixbit encoded sha1's
_asciihash_re_str = '[-_0-9A-Za-z]{26}[AEIMQUYcgkosw048]'
_asciihash_re = re.compile('^'+_asciihash_re_str+'$')
# re for matching any valid canonical mojosixbit encoded string of bytes
_mojosixbit_re_str = '[-_0-9A-Za-z]*(|[-_0-9A-Za-z]{2}[AEIMQUYcgkosw048]|[-_0-9A-Za-z]{3}[AQgw])'
_mojosixbit_re = re.compile('^'+_mojosixbit_re_str+'$')
# internally used translation tables for filename safe sixbit strings
__to_mojosixbit = string.maketrans('+/', '-_')
__from_mojosixbit = string.maketrans('-_=+/', '+/!!!') # =+/ are converted to ! so that they are rejected by the underlying RFC style base64 decoder
def _str_to_mojosixbit(str) :
"""
Convert a string into our sixbit representation -without- any trailing '=' sign or newline padding.
You probably want `b2a()', which works on arguments of any length.
"""
result = binascii.b2a_base64(str)
# remove any newlines and '=' sign padding and translate
# '+' -> '-' and '/' -> '_' for a filename safe representation
result = string.translate(result, __to_mojosixbit, '\r\n=')
return result
def _mojosixbit_to_str(sixbit) :
"""
Convert a string back to binary from a sixbit representation.
You probably want `a2b()'.
"""
# add the appropriate '=' sign padding (len must be multiple of 4)
if hasattr(types, 'UnicodeType') and type(sixbit) is types.UnicodeType:
sixbit = str(sixbit)
# translate '-' -> '+' and '_' -> '/' for decoding
sixbit = string.translate(sixbit, __from_mojosixbit)
topad = len(sixbit) % 4
if topad == 3 :
padding = '='
elif topad == 2 :
padding = '=='
elif topad == 1 :
padding = '==='
else :
padding = ''
return binascii.a2b_base64(sixbit + padding)
def b2a(data):
"""
Convert a string into our sixbit representation _without_ any trailing '=' sign or newline
padding.
"""
LEN_OF_BLOCK = 57
# NOTE this is O((len(data)/57)^2) for large strings but we will never convert large strings to ascii so it doesn't matter
i = 0
asciiStr = ""
while (i < len(data)):
asciiStr = asciiStr + _str_to_mojosixbit(buffer(data[i:i+LEN_OF_BLOCK]))
i = i + LEN_OF_BLOCK
return asciiStr
try:
unicode
_strtypes = (types.StringType, types.UnicodeType)
except:
_strtypes = (types.StringType,)
def a2b(astr, _strtypes=_strtypes):
"""
@precondition `astr' is a string.: type(astr) in _strtypes: "astr: %s" % `astr`
@throws mojosixbit.Error if `astr' is `None', is an empty string "", does not fit the Mojo
sixbit format, or has trailing garbage, or if b2a(a2b(astr)) != astr
"""
assert type(astr) in _strtypes, "`astr' is a string." + " -- " + "astr: %s" % `astr`
if len(astr) == 0:
raise Error, "empty string is not a valid ascii-encoded value"
if not _mojosixbit_re.match(astr):
raise Error, ("string is not a valid ascii-encoded value", astr,)
try:
return _mojosixbit_to_str(astr)
except binascii.Error, le:
raise Error, (astr, le,)
def b2a_long_string_idempotent(thing):
"""
If `thing' is not a valid mojosixbit-encoding, then return `b2a(thing)' on it, else return `thing'.
Beware that strings that are too short (say, shorter than 20 bytes), might accidentally look like
a valid mojosixbit-encoding when they aren't.
@precondition `thing' must be long enough that there is no significant chance of an accident.: len(thing) >= 20: "len(thing): %s, thing: %s" % (std.hr(len(thing)), std.hr(thing),)
"""
assert len(thing) >= 20, "precondition: `thing' must be long enough that there is no significant chance of an accident." + " -- " + "len(thing): %s, thing: %s" % (std.hr(len(thing)), std.hr(thing),)
if _asciihash_re.match(thing):
return thing
else:
return b2a(thing)
def a2b_long_string_idempotent(thing):
"""
If `thing' is a valid mojosixbit-encoding, then return `a2b(thing)' on it, else return `thing'.
Beware that strings that are too short (say, shorter than 20 bytes), might accidentally look like
a valid mojosixbit-encoding when they aren't.
@precondition `thing' must be long enough that there is no significant chance of an accident.: len(thing) >= 20: "len(thing): %s, thing: %s" % (std.hr(len(thing)), std.hr(thing),)
"""
assert len(thing) >= 20, "precondition: `thing' must be long enough that there is no significant chance of an accident." + " -- " + "len(thing): %s, thing: %s" % (std.hr(len(thing)), std.hr(thing),)
while 1:
try:
thing = a2b(thing)
except Error:
break
return thing
def test_mojosixbit():
assert _mojosixbit_to_str('abcd') == 'i\267\035'
assert _mojosixbit_to_str('abcdef') == 'i\267\035y'
assert _mojosixbit_to_str('abcdefg') == 'i\267\035y\370'
assert _mojosixbit_to_str('abcdefgh') =='i\267\035y\370!'
from struct import pack
for x in range(0, 255) :
sha_str = '\0'*19 + pack('B', x)
sixbit_str = _str_to_mojosixbit(sha_str)
assert len(sixbit_str) == 27
assert _asciihash_re.match(sixbit_str)
def test_b2a():
import random
from array import array
b = array('B')
for i in range(900):
b.append(random.randint(0, 255))
astr = b2a(b)
c = a2b(astr)
assert b.tostring() == c
def test_a2b_rejectsNonEncoded():
try:
a2b("*&")
except Error:
return
assert false
def test_a2b_rejectsNonMojoSixBit():
try:
a2b("hvfkN/q")
except Error:
return
assert false
def test_a2b_rejectsTrailingGarbage():
try:
a2b("c3BhbQ@@@")
except Error:
return
assert false
def test_a2b_rejectsTrailingEqualSigns():
try:
a2b("c3BhbQ==")
except Error:
return
assert false
def test_a2b_rejectsTrailingNewlines():
try:
a2b("c3BhbQ\n")
except Error:
return
assert false
def test_mojosixbit_re():
for num in xrange(17):
assert _mojosixbit_re.match(b2a(chr(num))), ('failed 2:', num, b2a(chr(num)))
assert _mojosixbit_re.match(b2a(' '+chr(num))), ('failed 3:', num, b2a(' '+chr(num)))
assert _mojosixbit_re.match(b2a('* '+chr(num))), ('failed 4:', num, b2a('* '+chr(num)))
assert _mojosixbit_re.match(b2a(':-)'+chr(num))), ('failed 5:', num, b2a(':-)'+chr(num)))
assert _mojosixbit_re.match(b2a('#8-}'+chr(num))), ('failed 6:', num, b2a('#8-}'+chr(num)))
assert _mojosixbit_re.match(b2a(' ;-} '+chr(num))), ('failed 7:', num, b2a(' ;-} '+chr(num)))
mojo_test_flag = 1
#### generic stuff
def run():
import RunTests
RunTests.runTests(["mojosixbit"])
#### this runs if you import this module by itself
if __name__ == '__main__':
run()
|
zooko/egtp
|
common/mojosixbit.py
|
Python
|
agpl-3.0
| 7,266
|
[
"VisIt"
] |
a2eb5483be85ebe53fb120799aff6541e4d14aad9e92e59d0c871e3b02377b92
|
# This file is part of the py-boinc-plotter,
# which provides parsing and plotting of boinc statistics and
# badge information.
# Copyright (C) 2013 obtitus@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# END LICENCE
# Standard
import re
import datetime
import logging
logger = logging.getLogger('boinc.browser')
import xml.etree.ElementTree
import json
# Non-standard python
from bs4 import BeautifulSoup
# This project
import task
import plot.badge as badge
import statistics
import project
class HTMLParser(object):
def __init__(self, browser, p=None):
self.Task = task.Task_web
self.wantedLength = 10 # wanted length of task data
self.browser = browser
if p != None:
self.project = p
else:
self.project = project.Project(url=self.browser.name)
self.name = browser.name
self.logger = logging.getLogger('boinc.browser.{}'.format(self.__class__.__name__))
@staticmethod
def getParser(section, **kwargs):
"""Factory for returning the correct subclass based on section name"""
if section == 'worldcommunitygrid.org':
logger.debug('getting worldcommunitygrid.org parser')
parser = HTMLParser_worldcommunitygrid(**kwargs)
elif section == 'www.rechenkraft.net/yoyo':
logger.debug('getting yoyo parser')
parser = HTMLParser_yoyo(**kwargs)
elif section == 'wuprop.boinc-af.org':
logger.debug('getting wuprop parser')
parser = HTMLParser_wuprop(**kwargs)
elif section == 'www.primegrid.com':
logger.debug('getting primegrid parser')
parser = HTMLParser_primegrid(**kwargs)
elif section == 'numberfields.asu.edu/NumberFields':
logger.debug('getting NumberFields parser')
parser = HTMLParser_numberfields(**kwargs)
# elif section == 'escatter11.fullerton.edu/nfs/':
# logger.debug('getting nfs parser')
# parser = HTMLParser_nfs(**kwargs)
elif section == 'www.cpdn.org/cpdnboinc':
logger.debug('getting climateprediction parser')
parser = HTMLParser_climateprediction(**kwargs)
elif section == 'einstein.phys.uwm.edu/':
logger.debug('getting einstein parser')
parser = HTMLParser_einstein(**kwargs)
elif section == 'boinc.bakerlab.org/rosetta/':
logger.debug('getting rosetta parser')
parser = HTMLParser_rosetta(**kwargs)
else: # Lets try the generic
logger.debug('getting generic parser, name = %s', section)
parser = HTMLParser(**kwargs)
return parser
def parse(self, content):
"""Fills up the self.project with applications and tasks
Assumes the application name is the last column"""
for row in self.getRows(content):
try:
t = self.Task.createFromHTML(row[:-1])
except Exception as e:
self.logger.exception('Unable to parse %s as task: "%s"', row, e)
continue
if (t.deadline - datetime.datetime.utcnow()) < -datetime.timedelta(days=90):
logger.info('Stopping parsing at task "%s" due to old deadline' % t)
return ;
application = self.project.appendApplication(row[-1])
application.tasks.append(t)
def parseTable(self, soup):
for tr in soup.find_all('tr'):
ret = [td.text for td in tr.find_all('td')]
if len(ret) != 0:
self.logger.debug('in parseTable, got %s, len = %s, expected %s', ret, len(ret), self.wantedLength)
if len(ret) == self.wantedLength and ret[0].strip() != '':
yield ret
def getRows(self, html):
"""Generator for each row in result table"""
soup = BeautifulSoup(html, 'lxml')
for row in self.parseTable(soup):
self.logger.debug('yielding %s', row)
yield row
for additionalPage in self.findNextPage(soup):
html = self.browser.visit(additionalPage)
if html != '':
for row in self.getRows(html): # Recursion!
yield row
def findNextPage(self, soup):
"""Finds links to additional pages of tasks"""
reg_compiled = re.compile('offset=(\d+)')
for offset in soup.find_all('a', href=reg_compiled):
offset_str = re.search(reg_compiled, offset['href']).group(1)
offset_int = int(offset_str)
if offset_int != 0:
yield offset_int
def parseWorkunit(self, html):
"""Parses the workunit page, currently returns the application name"""
soup = BeautifulSoup(html, 'lxml')
for first_td in soup.find_all('td', class_='fieldname'):
if first_td.text == 'application':
app_name = first_td.find_next_sibling('td', class_='fieldvalue')
return app_name.text
def fieldvalue(self, soup, fieldname):
"""yields fieldvalue for given fieldname"""
for row in soup.find_all('tr'):
if row.td is None:
continue
first_td = row.td
_class = first_td.get('class', [''])
if _class == ['fieldname'] and first_td.text.strip() == fieldname:
return first_td.find_next_sibling('td')
class HTMLParser_worldcommunitygrid(HTMLParser):
def __init__(self, *args, **kwargs):
super(HTMLParser_worldcommunitygrid, self).__init__(*args, **kwargs)
self.Task = task.Task_web_worldcommunitygrid
def parse(self, content):
for result in self.getRows(content):
t = self.Task.createFromJSON(result)
app = self.project.appendApplicationShort(result['AppName'])
app.tasks.append(t)
def getRows(self, content, pagenr=1):
logger.debug('Called getRows, pagenr=%s', pagenr)
try:
data = json.loads(content)
except:
logger.exception('JSON error for "%s"', content)
data = data[u'ResultsStatus']
try:
for result in data[u'Results']:
yield result
logger.debug("ResultsAvailable > ResultsReturned + Offset = %s > %s + %s",
data['ResultsAvailable'], data['ResultsReturned'], data['Offset'])
if int(data['ResultsAvailable']) > int(data['ResultsReturned']) + int(data['Offset']):
content = self.browser.visit(pagenr+1)
if content != '':
for res in self.getRows(content, pagenr=pagenr+1): # recursion
yield res
except KeyError as e:
logger.exception('Parse exception, KeyError with keys %s', data.keys())
def getBadges(self):
page = self.browser.visitStatistics()
self.parseStatistics(page)
def parseStatistics(self, page):
"""Gets the xml statistics for worldcommunitygrid"""
tree = xml.etree.ElementTree.fromstring(page)
e = tree.find('Error')
if e:
print(e.text)
return None, None
try:
member = tree.iter('MemberStat').next()
except StopIteration:
print('Something is wrong with xml statisics, correct username and code?')
return None, None
lastResult = member.find('LastResult').text
lastResult = lastResult.replace('T', ' ')
stat = list()
for s in ['RunTime', 'RunTimeRank', 'RunTimePerDay',
'Points', 'PointsRank', 'PointsPerDay',
'Results', 'ResultsRank', 'ResultsPerDay']:
i = member.iter(s).next()
stat.append(i.text)
stat = statistics.ProjectStatistics_worldcommunitygrid(lastResult, *stat)
self.project.appendStatistics(stat)
for application in tree.iter('Project'):
short = application.find('ProjectShortName').text
name = application.find('ProjectName').text
runtime = application.find('RunTime').text
points = application.find('Points').text
results = application.find('Results').text
app = self.project.appendApplication(name)
app.name_short = short
Stat = statistics.ApplicationStatistics_worldcommunitygrid
app.appendStatistics(Stat(runtime, points, results))
for b in tree.iter('Badge'):
name = b.find('ProjectName').text
url = b.iter('Url').next().text
t = b.iter('Description').next().text
Badge = badge.Badge_worldcommunitygrid
self.project.appendBadge(name, Badge(name=t, url=url))
class HTMLParser_yoyo(HTMLParser):
def __init__(self, *args, **kwargs):
super(HTMLParser_yoyo, self).__init__(*args, **kwargs)
self.Task = task.Task_web_yoyo
def parseTable(self, soup):
for table in soup.find_all('table'):
for tr in table.find_all('tr'):
row = list()
for td in tr.find_all('td'):
if td.find('a') != None:
name = td.a.get('title')
if name is not None:
row.append(name.replace('Name: ', ''))
url = td.a.get('href', '')
if 'workunit' in url:
row.append('/'+url)
else:
row.append(td.text)
if len(row) == 10:
row[0], row[1] = row[1], row[0]
yield row
# def findNextPage(self, soup):
# # This is ugly, but we need to bypass the superclass
# return HTMLParser.findNextPage(self, soup)
# def parseWorkunit(self, html):
# # This is ugly, but we need to bypass the superclass
# return HTMLParser.parseWorkunit(self, html)
def getBadges(self):
"""Fills out project badges"""
html = self.browser.visitPage('home.php')
soup = BeautifulSoup(html, 'lxml')
self.badgeTabel(soup)
def badgeTabel(self, soup):
""" Extracts projects table from www.rechenkraft.net/yoyo/home.php"""
for t in soup.find_all('table'):
badgeTable = t.table # The table within a table
if badgeTable != None:
for row in badgeTable.find_all('tr'):
data = row.find_all('td')
if len(data) == 4:
name = data[0].text
totalCredits = data[1].text.replace(',', '') # thousand seperator
workunits = data[2].text
if re.match('\d+ \w\w\w \d\d\d\d', data[3].text):
# Hack to avoid the "Projects in which you are participating" table.
continue
app = self.project.appendApplication(name)
app.appendStatistics(statistics.ApplicationStatistics(totalCredits,
workunits))
if data[3].a:
b = data[3].a.img['alt']
url = data[3].a.img['src']
self.project.appendBadge(name, badge.Badge_yoyo(b, url))
class HTMLParser_climateprediction(HTMLParser):
"""Same as web but length is 11"""
def __init__(self, *args, **kwargs):
super(HTMLParser_climateprediction, self).__init__(*args, **kwargs)
self.Task = task.Task_web_climateprediction
self.wantedLength = 10
self.name = 'climateprediction.net'
self.project.setName(self.name)
self.project.setUrl('http://www.climateprediction.net')
self.host = dict()
def parseHostDetail(self, html):
soup = BeautifulSoup(html, 'lxml')
table = soup.find_all('table')[-1]
for row in table.find_all('tr'):
items = list()
for item in row.find_all('td'):
items.append(item.text)
if len(items) == 2:
#print('fieldname', items[0])
#print('fieldvalue', items[1])
self.host[items[0]] = items[1]
return self.host
def parseWorkunit(self, html, task):
soup = BeautifulSoup(html, 'lxml')
"""First the task creation date:"""
for first_td in soup.find_all('td'):
if first_td.text == 'created':
created = first_td.find_next_sibling('td')
# try:
date = created.text.replace(', ', ' ') # 9 Dec 2016, 18:58:24 UTC
task.created = datetime.datetime.strptime(date, '%d %b %Y %H:%M:%S UTC')
# except ValueError:
#
# task.created = datetime.datetime.strptime(created.text, '%d %b %Y %H:%M:%S UTC')
break
else:
raise Exception('Parsing exception, could not determine create date for %s' % task)
"""Table:"""
task.tasks = list()
tables = soup.find_all('table', width='100%')
if len(tables) == 0:
logger.error('no table found %s, %s', tables, soup.prettify())
return
table = tables[-2]
for tr in table.find_all('tr'):
row = list()
for td in tr.find_all('td'):
row.append(td.text)
logger.debug('row = %s, %s', len(row), row)
if len(row) == 9:#10:
row.insert(1, task.workUnitId)
if len(row) == 10:#11:
t = self.Task.createFromHTML(row[:-1])
t.created = task.created
task.tasks.append(t)
# fixme: hack, climatepredication seems to have a sorting bug, so we cant just return
# when we meet an old task
def parse(self, content):
"""Fills up the self.project with applications and tasks
Assumes the application name is the last column"""
#content = content.decode()
for row in self.getRows(content):
try:
t = self.Task.createFromHTML(row[:-1])
except Exception as e:
self.logger.exception('Unable to parse %s as task: "%s"', row, e)
continue
if (t.deadline - datetime.datetime.utcnow()) < -datetime.timedelta(days=90):
logger.debug('skipping task "%s" due to old deadline' % t)
#return ;
continue
application = self.project.appendApplication(row[-1])
if len(application.tasks) > 1000:
logger.info('Max nr. of tasks reached, breaking')
return ;
application.tasks.append(t)
class HTMLParser_einstein(HTMLParser):
"""Same as web but length is 11"""
def __init__(self, *args, **kwargs):
super(HTMLParser_einstein, self).__init__(*args, **kwargs)
self.Task = task.Task_web_climateprediction
self.wantedLength = 11
class HTMLParser_primegrid(HTMLParser):
def getBadges(self):
"""Fills out project badges"""
html = self.browser.visitPage('home.php')
soup = BeautifulSoup(html, 'lxml')
for app_name, badge in self.parseHome(soup):
self.project.appendBadge(app_name, badge)
self.parseStatistics(soup)
def parseHome(self, soup):
"""yields app name and Badge object"""
for fieldvalue in self.fieldvalue(soup, 'Badges'):
yield self.parseBadge(fieldvalue)
def parseBadge(self, soup):
"""
Expects something like this:
<a href="/show_badges.php?userid=222267">
<img alt="PPS Sieve Bronze: More than 20,000 credits (30,339)" class="badge"
src="/img/badges/sr2sieve_pps_bronze.png" title="PPS Sieve Bronze: More than 20,000 credits (30,339)"/>
</a>
"""
url = str(self.browser.name)
url += soup.img.get('src')
name = soup.img.get('title')
b = badge.Badge_primegrid(name=name,
url=url)
self.logger.debug('Badge %s', b)
return b.app_name, b
def parseStatistics(self, soup):
"""Tries to parse the application table at home.php"""
table = soup.find_all('table')[-2] # Hack
stat = None
for td in table.find_all('td'):
class_ = td.get('class')
if class_ == ['heading']:
if stat is not None:
self.project.appendStatistics(stat) # Append previous
stat = statistics.ProjectStatistics_primegrid()
stat['name'] = td.text
elif class_ == ['fieldname']:
fieldname = td.text
elif class_ == ['fieldvalue']:
fieldvalue = td.text
stat[fieldname] = fieldvalue
if stat is not None:
self.project.appendStatistics(stat) # Append last
# for app in self.project.applications:
# print(self.project.applications[app])
# assert False
class HTMLParser_wuprop(HTMLParser):
def getBadges(self):
page = self.browser.visitHome()
soup = BeautifulSoup(page, 'lxml')
for b in self.fieldvalue(soup, 'Badge'):
badge = self.parseBadge(b)
self.project.appendBadge(badge=badge)
def parseBadge(self, soup):
"""
Expects something like this:
<img src="img/badge/100_0_0_0_0.png"/>
"""
url = str(self.browser.name).replace('www.', '') + '/' # http://wuprop.boinc-af.org/
name = soup.get('src')
url += name
b = badge.Badge_wuprop(name=name,
url=url)
self.logger.debug('Badge %s', b)
return b
def projectTable(self, html):
""" Extracts projects table from wuprop.boinc-af.org/home.php"""
projects = dict() # Key is user_friendly_name!
soup = BeautifulSoup(html, 'lxml')
t = soup.find_all('table')
for row in t[-1].find_all('tr'):
data = row.find_all('td')
if len(data) == 6:
index = data[0].text
proj_name = data[1].text
app_name = data[2].text
runningTime = data[3].text
pending = data[-1].text
stat = statistics.ApplicationStatistics_wuprop(runtime=runningTime,
pending=pending)
if proj_name not in projects:
projects[proj_name] = project.Project(name=proj_name)
logger.debug('app_name %s', app_name)
app = projects[proj_name].appendApplication(app_name, is_long=True)
app.appendStatistics(stat)
#self.project = Project(short=projects, name=application, wuRuntime=runningTime, wuPending=pending)
return projects
class HTMLParser_numberfields(HTMLParser):
Badge = badge.Badge_numberfields
def getBadges(self):
page = self.browser.visitHome()
self.parseHome(page)
def parseHome(self, html):
soup = BeautifulSoup(html, 'lxml')
for first_td in soup.find_all('td', class_='fieldname'):
fieldname = first_td.text.strip()
if fieldname == 'Badges':
b = first_td.find_next_sibling('td', class_='fieldvalue')
img = b.find('img')
if img is None:
continue
try:
url = img['src']
name = img['title']
b = self.Badge(name=name, url=url)
self.project.appendBadge(badge=b)
except KeyError as e:
continue
elif fieldname == 'Total credit':
v = first_td.find_next_sibling('td', class_='fieldvalue').text
v = v.replace(',', '')
self.project.credit = float(v)
class HTMLParser_nfs(HTMLParser_numberfields):
Badge = badge.Badge_nfs
class HTMLParser_rosetta(HTMLParser):
"""Same as web but diffent task class and hack to return app as last column"""
def __init__(self, *args, **kwargs):
super(HTMLParser_rosetta, self).__init__(*args, **kwargs)
self.Task = task.Task_web_rosetta
def parseTable(self, soup):
for row in super(HTMLParser_rosetta, self).parseTable(soup):
row.append('Rosetta')
yield row
|
obtitus/py-boinc-plotter
|
pyBoincPlotter/parse.py
|
Python
|
gpl-3.0
| 21,387
|
[
"VisIt"
] |
5eff716230bdce5536509a36c54c1a226145b466b1b3fec22fb65cd0aa2d00f8
|
from coalib.core.CircularDependencyError import CircularDependencyError
def traverse_graph(start_nodes, get_successive_nodes,
run_on_edge=lambda prev, nxt: None):
"""
Traverses all edges of a directed, possibly disconnected graph once.
Detects cyclic graphs by raising a ``CircularDependencyError``.
>>> graph = {1: [2], 2: [3, 4], 5: [3], 3: [6]}
>>> def get_successive_nodes(node):
... return graph.get(node, [])
>>> edges = set()
>>> def append_to_edges(prev, nxt):
... edges.add((prev, nxt))
>>> traverse_graph([1, 5], get_successive_nodes, append_to_edges)
>>> sorted(edges)
[(1, 2), (2, 3), (2, 4), (3, 6), (5, 3)]
You can also use this function to detect cyclic graphs:
>>> graph = {1: [2], 2: [3], 3: [1]}
>>> traverse_graph([1], get_successive_nodes)
Traceback (most recent call last):
...
coalib.core.CircularDependencyError.CircularDependencyError: ...
:param start_nodes:
The nodes where to start traversing the graph.
:param get_successive_nodes:
A callable that takes in a node and returns an iterable of nodes to
traverse next.
:param run_on_edge:
A callable that is run on each edge during traversing. Takes in two
parameters, the previous- and next-node which form an edge. The default
is an empty function.
:raises CircularDependencyError:
Raised when the graph is cyclic.
"""
path = set()
visited_nodes = set()
def visit(node):
if node not in visited_nodes:
visited_nodes.add(node)
path.add(node)
for subnode in get_successive_nodes(node):
run_on_edge(node, subnode)
if subnode in path:
raise CircularDependencyError(subnode)
visit(subnode)
path.remove(node)
for node in start_nodes:
visit(node)
|
Shade5/coala
|
coalib/core/Graphs.py
|
Python
|
agpl-3.0
| 1,951
|
[
"VisIt"
] |
bddcf7efba2a9df4137e9a8b844bf23419abef3864b6e1efdf565809583be584
|
"""
Enterprise Offer Discount tests
"""
from regression.pages.common.utils import (
extract_discount_value_from_response,
extract_numerical_value_from_price_string
)
from regression.pages.enterprise.enterprise_const import (
DEFAULT_COURSE_PRICE,
ENT_CUSTOMER_CATALOG_UUID,
ENTERPRISE_NAME
)
from regression.pages.whitelabel import ECOM_URL
from regression.pages.whitelabel.basket_page import SingleSeatBasketPage
from regression.tests.enterprise.ent_test_base import EnterpriseTestBase
class TestDiscountEnterpriseOffer(EnterpriseTestBase):
"""
Tests for Percentage Discount Enterprise Offers
"""
DISCOUNT_MSG = "Discount provided by "
def setUp(self):
super().setUp()
self.course_price = DEFAULT_COURSE_PRICE
self.target_url = ECOM_URL + '/enterprise/offers'
def test_enterprise_percentage_offer(self):
"""
Scenario: To verify that user sees the correct discount
percentage info and detail on enterprise landing page,
basket page and on receipt page.
"""
# Login user to LMS using staff credentials
self.login_user_lms_using_api()
# Get all enterprise offers data using api request
offers_response = self.login_api.get_offer_request(self.target_url)
# Get discount value from response against catalog UUID
discount_value = extract_discount_value_from_response(
ENT_CUSTOMER_CATALOG_UUID, offers_response
)
discounted_course_price = self.course_price - \
(self.course_price * discount_value) / 100
self.logout_from_lms_using_api()
self.ecommerce_courses_page.visit()
self.register_and_go_to_course_enrollment_page()
# Call the fixture to unlink existing account for the user
self.addCleanup(self.unlink_account)
# Get course original price and course discounted price
price_details = \
self.ent_course_enrollment.get_course_price_details().split()
# extract_numerical_value_from_price_string(price_details)
self.assertEqual(
self.course_price,
extract_numerical_value_from_price_string(price_details[1])
)
self.assertEqual(
discounted_course_price,
extract_numerical_value_from_price_string(price_details[3])
)
self.assertIn(
self.DISCOUNT_MSG + ENTERPRISE_NAME,
self.ent_course_enrollment.get_course_price_details()
)
self.ent_course_enrollment.go_to_data_consent_page()
self.ent_data_sharing_consent.wait_for_page()
# Verify that accepting data consent takes user to basket page
self.ent_data_sharing_consent.accept_data_sharing_consent()
SingleSeatBasketPage(self.browser).wait_for_page()
self.verify_info_is_populated_on_basket(
discounted_course_price
)
self.verify_receipt_info_for_discounted_course()
|
edx/edx-e2e-tests
|
regression/tests/enterprise/test_percentage_discount_ent_offer.py
|
Python
|
agpl-3.0
| 2,983
|
[
"VisIt"
] |
cf967d2b4e074ef4ae502a338c8e075abd2058a3efcda59843427c6dfa296069
|
# -*- coding: utf-8 -*-
"""
Test for matlab problems
"""
import time
from ...pages.lms.matlab_problem import MatlabProblemPage
from ...fixtures.course import XBlockFixtureDesc
from ...fixtures.xqueue import XQueueResponseFixture
from .test_lms_problems import ProblemsTest
from textwrap import dedent
class MatlabProblemTest(ProblemsTest):
"""
Tests that verify matlab problem "Run Code".
"""
def get_problem(self):
"""
Create a matlab problem for the test.
"""
problem_data = dedent("""
<problem markdown="null">
<text>
<p>
Write MATLAB code to create the following row vector and store it in a variable named <code>V</code>.
</p>
<table id="a0000000466" class="equation" width="100%" cellspacing="0" cellpadding="7" style="table-layout:auto">
<tr>
<td class="equation">[1 1 2 3 5 8 13]</td>
</tr>
</table>
<p>
<coderesponse queuename="matlab">
<matlabinput rows="10" cols="40" mode="" tabsize="4">
<plot_payload>
</plot_payload>
</matlabinput>
<codeparam>
<initial_display/>
<answer_display>
</answer_display>
<grader_payload>
</grader_payload>
</codeparam>
</coderesponse>
</p>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'Test Matlab Problem', data=problem_data)
def _goto_matlab_problem_page(self):
"""
Open matlab problem page with assertion.
"""
self.courseware_page.visit()
matlab_problem_page = MatlabProblemPage(self.browser)
self.assertEqual(matlab_problem_page.problem_name, 'TEST MATLAB PROBLEM')
return matlab_problem_page
def test_run_code(self):
"""
Test "Run Code" button functionality.
"""
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.submission = "a=1" + self.unique_id[0:5]
self.xqueue_grade_response = {'msg': self.submission}
matlab_problem_page = self._goto_matlab_problem_page()
# Configure the XQueue stub's response for the text we will submit
if self.xqueue_grade_response is not None:
XQueueResponseFixture(self.submission, self.xqueue_grade_response).install()
matlab_problem_page.set_response(self.submission)
matlab_problem_page.click_run_code()
self.assertEqual(
u'Submitted. As soon as a response is returned, this message will be replaced by that feedback.',
matlab_problem_page.get_grader_msg(".external-grader-message")[0]
)
# Wait 5 seconds for xqueue stub server grader response sent back to lms.
time.sleep(5)
self.assertEqual(u'', matlab_problem_page.get_grader_msg(".external-grader-message")[0])
self.assertEqual(
self.xqueue_grade_response.get("msg"),
matlab_problem_page.get_grader_msg(".ungraded-matlab-result")[0]
)
|
MakeHer/edx-platform
|
common/test/acceptance/tests/lms/test_lms_matlab_problem.py
|
Python
|
agpl-3.0
| 3,475
|
[
"VisIt"
] |
1088ae66d14f027758125b0907384bcd552420c2d6a1cd9f892aea1dd6945564
|
""" This is a test of the DIRACCAProxyProvider
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name,wrong-import-position,protected-access
import os
import re
import sys
import shutil
# TODO: This should be modernised to use subprocess(32)
try:
import commands
except ImportError:
# Python 3's subprocess module contains a compatibility layer
import subprocess as commands
import unittest
import tempfile
import pytest
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Resources.ProxyProvider.DIRACCAProxyProvider import DIRACCAProxyProvider
certsPath = os.path.join(os.path.dirname(DIRAC.__file__), 'Core/Security/test/certs')
testCAPath = os.path.join(tempfile.mkdtemp(dir='/tmp'), 'ca')
testCAConfigFile = os.path.join(testCAPath, 'openssl_config_ca.cnf')
diracCADict = {'ProviderType': 'DIRACCA',
'CertFile': os.path.join(certsPath, 'ca/ca.cert.pem'),
'KeyFile': os.path.join(certsPath, 'ca/ca.key.pem'),
'Supplied': ['O', 'OU', 'CN'],
'Optional': ['emailAddress'],
'DNOrder': ['O', 'OU', 'CN', 'emailAddress'],
'OU': 'CA',
'C': 'DN',
'O': 'DIRACCA',
'ProviderName': 'DIRAC_CA'}
diracCAConf = {'ProviderType': 'DIRACCA',
'CAConfigFile': testCAConfigFile,
'ProviderName': 'DIRAC_CA_CFG'}
class DIRACCAProviderTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.failed = False
shutil.copytree(os.path.join(certsPath, 'ca'), testCAPath)
# Parse
lines = []
with open(testCAConfigFile, "r") as caCFG:
for line in caCFG:
if re.findall('=', re.sub(r'#.*', '', line)):
# Ignore comments
field = re.sub(r'#.*', '', line).replace(' ', '').rstrip().split('=')[0]
# Put the right dir
line = 'dir = %s #PUT THE RIGHT DIR HERE!\n' % (testCAPath) if field == 'dir' else line
lines.append(line)
# Write modified conf. file
with open(testCAConfigFile, "w") as caCFG:
caCFG.writelines(lines)
# Result
status, output = commands.getstatusoutput('ls -al %s' % testCAPath)
if status:
gLogger.error(output)
exit()
gLogger.debug('Test path:\n', output)
def setUp(self):
gLogger.debug('\n')
if self.failed:
self.fail(self.failed)
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
if os.path.exists(testCAPath):
shutil.rmtree(testCAPath)
class testDIRACCAProvider(DIRACCAProviderTestCase):
@pytest.mark.slow
def test_getProxy(self):
""" Test 'getProxy' - try to get proxies for different users and check it
"""
def check(proxyStr, proxyProvider, name):
""" Check proxy
:param str proxyStr: proxy as string
:param str proxyProvider: proxy provider name
:param str name: proxy name
"""
proxyFile = os.path.join(testCAPath, proxyProvider + name.replace(' ', '') + '.pem')
gLogger.info('Check proxy..')
chain = X509Chain()
result = chain.loadProxyFromString(proxyStr)
self.assertTrue(result['OK'], '\n' + result.get('Message', 'Error message is absent.'))
for result in [chain.getRemainingSecs(),
chain.getIssuerCert(),
chain.getPKeyObj(),
chain.getCertList(),
chain.getNumCertsInChain(),
chain.generateProxyToString(3600),
chain.generateProxyToFile(proxyFile, 3600),
chain.isProxy(),
chain.isLimitedProxy(),
chain.isValidProxy(),
chain.isVOMS(),
chain.isRFC()]:
self.assertTrue(result['OK'], '\n' + result.get('Message', 'Error message is absent.'))
for proxyProvider, log in [(diracCADict, 'configuring only in DIRAC CFG'),
(diracCAConf, 'read configuration file')]:
gLogger.info('\n* Try proxy provider that %s..' % log)
ca = DIRACCAProxyProvider()
result = ca.setParameters(proxyProvider)
self.assertTrue(result['OK'], '\n' + result.get('Message', 'Error message is absent.'))
gLogger.info('* Get proxy using FullName and Email of user..')
for name, email, res in [('MrUser', 'good@mail.com', True),
('MrUser_1', 'good_1@mail.com', True),
(False, 'good@mail.com', False),
('MrUser', False, True)]:
gLogger.info('\nFullName: %s' % name or 'absent', 'Email: %s..' % email or 'absent')
# Create user DN
result = ca.generateDN(FullName=name, Email=email)
text = 'Must be ended %s%s' % ('successful' if res else 'with error',
': %s' % result.get('Message', 'Error message is absent.'))
self.assertEqual(result['OK'], res, text)
if not res:
gLogger.info('Msg: %s' % (result['Message']))
else:
userDN = result['Value']
gLogger.info('Created DN:', userDN)
result = ca.getProxy(userDN)
text = 'Must be ended %s%s' % ('successful' if res else 'with error',
': %s' % result.get('Message', 'Error message is absent.'))
self.assertEqual(result['OK'], res, text)
if not res:
gLogger.info('Msg: %s' % (result['Message']))
else:
check(result['Value'], proxyProvider['ProviderName'], name)
gLogger.info('\n* Get proxy using user DN..')
for dn, name, res in [('/O=DIRAC/OU=DIRAC CA/CN=user_3/emailAddress=some@mail.org', 'user_3', True),
('/O=Dirac/OU=DIRAC CA/CN=user/emailAddress=some@mail.org', 'user', True),
('/O=Dirac/OU=Without supplied field/emailAddress=some@mail.org', 'not_suplied', False),
('/O=Dirac/OU=DIRAC CA/CN=without email', 'no_email', True),
('/some=bad/DN=', 'badDN', False),
('/BF=Bad Field/O=IN/CN=DN', 'badField', False),
(False, 'absent', False)]:
gLogger.info('\nDN:', dn or 'absent')
try:
result = ca.getProxy(dn)
except Exception as e:
result['Message'] = str(e)
self.assertFalse(res, e)
text = 'Must be ended %s%s' % ('successful' if res else 'with error',
': %s' % result.get('Message', 'Error message is absent.'))
self.assertEqual(result['OK'], res, text)
if not res:
gLogger.info('Msg: %s' % (result['Message']))
else:
check(result['Value'], proxyProvider['ProviderName'], name)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(DIRACCAProviderTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(testDIRACCAProvider))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
yujikato/DIRAC
|
src/DIRAC/Resources/ProxyProvider/test/Test_DIRACCAProxyProvider.py
|
Python
|
gpl-3.0
| 7,303
|
[
"DIRAC"
] |
6a7b8d85ea8b8d347f166e32ae9fdc2724cd7066ea107126fbe292dd99b2e6b4
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddClientToClassVisit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cross_regional_booking_performed': 'bool',
'site_id': 'int',
'waitlist_entry_id': 'int',
'appointment_id': 'int',
'appointment_gender_preference': 'str',
'appointment_status': 'str',
'class_id': 'int',
'client_id': 'str',
'start_date_time': 'datetime',
'end_date_time': 'datetime',
'id': 'int',
'last_modified_date_time': 'datetime',
'late_cancelled': 'bool',
'location_id': 'int',
'make_up': 'bool',
'name': 'str',
'service_id': 'int',
'signed_in': 'bool',
'staff_id': 'int',
'web_signup': 'bool',
'action': 'str'
}
attribute_map = {
'cross_regional_booking_performed': 'CrossRegionalBookingPerformed',
'site_id': 'SiteId',
'waitlist_entry_id': 'WaitlistEntryId',
'appointment_id': 'AppointmentId',
'appointment_gender_preference': 'AppointmentGenderPreference',
'appointment_status': 'AppointmentStatus',
'class_id': 'ClassId',
'client_id': 'ClientId',
'start_date_time': 'StartDateTime',
'end_date_time': 'EndDateTime',
'id': 'Id',
'last_modified_date_time': 'LastModifiedDateTime',
'late_cancelled': 'LateCancelled',
'location_id': 'LocationId',
'make_up': 'MakeUp',
'name': 'Name',
'service_id': 'ServiceId',
'signed_in': 'SignedIn',
'staff_id': 'StaffId',
'web_signup': 'WebSignup',
'action': 'Action'
}
def __init__(self, cross_regional_booking_performed=None, site_id=None, waitlist_entry_id=None, appointment_id=None, appointment_gender_preference=None, appointment_status=None, class_id=None, client_id=None, start_date_time=None, end_date_time=None, id=None, last_modified_date_time=None, late_cancelled=None, location_id=None, make_up=None, name=None, service_id=None, signed_in=None, staff_id=None, web_signup=None, action=None): # noqa: E501
"""AddClientToClassVisit - a model defined in Swagger""" # noqa: E501
self._cross_regional_booking_performed = None
self._site_id = None
self._waitlist_entry_id = None
self._appointment_id = None
self._appointment_gender_preference = None
self._appointment_status = None
self._class_id = None
self._client_id = None
self._start_date_time = None
self._end_date_time = None
self._id = None
self._last_modified_date_time = None
self._late_cancelled = None
self._location_id = None
self._make_up = None
self._name = None
self._service_id = None
self._signed_in = None
self._staff_id = None
self._web_signup = None
self._action = None
self.discriminator = None
if cross_regional_booking_performed is not None:
self.cross_regional_booking_performed = cross_regional_booking_performed
if site_id is not None:
self.site_id = site_id
if waitlist_entry_id is not None:
self.waitlist_entry_id = waitlist_entry_id
if appointment_id is not None:
self.appointment_id = appointment_id
if appointment_gender_preference is not None:
self.appointment_gender_preference = appointment_gender_preference
if appointment_status is not None:
self.appointment_status = appointment_status
if class_id is not None:
self.class_id = class_id
if client_id is not None:
self.client_id = client_id
if start_date_time is not None:
self.start_date_time = start_date_time
if end_date_time is not None:
self.end_date_time = end_date_time
if id is not None:
self.id = id
if last_modified_date_time is not None:
self.last_modified_date_time = last_modified_date_time
if late_cancelled is not None:
self.late_cancelled = late_cancelled
if location_id is not None:
self.location_id = location_id
if make_up is not None:
self.make_up = make_up
if name is not None:
self.name = name
if service_id is not None:
self.service_id = service_id
if signed_in is not None:
self.signed_in = signed_in
if staff_id is not None:
self.staff_id = staff_id
if web_signup is not None:
self.web_signup = web_signup
if action is not None:
self.action = action
@property
def cross_regional_booking_performed(self):
"""Gets the cross_regional_booking_performed of this AddClientToClassVisit. # noqa: E501
When `true`, indicates that the client is paying for the visit using a pricing option from one of their associated cross-regional profiles. # noqa: E501
:return: The cross_regional_booking_performed of this AddClientToClassVisit. # noqa: E501
:rtype: bool
"""
return self._cross_regional_booking_performed
@cross_regional_booking_performed.setter
def cross_regional_booking_performed(self, cross_regional_booking_performed):
"""Sets the cross_regional_booking_performed of this AddClientToClassVisit.
When `true`, indicates that the client is paying for the visit using a pricing option from one of their associated cross-regional profiles. # noqa: E501
:param cross_regional_booking_performed: The cross_regional_booking_performed of this AddClientToClassVisit. # noqa: E501
:type: bool
"""
self._cross_regional_booking_performed = cross_regional_booking_performed
@property
def site_id(self):
"""Gets the site_id of this AddClientToClassVisit. # noqa: E501
The ID of the business from which cross-regional payment is applied. # noqa: E501
:return: The site_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._site_id
@site_id.setter
def site_id(self, site_id):
"""Sets the site_id of this AddClientToClassVisit.
The ID of the business from which cross-regional payment is applied. # noqa: E501
:param site_id: The site_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._site_id = site_id
@property
def waitlist_entry_id(self):
"""Gets the waitlist_entry_id of this AddClientToClassVisit. # noqa: E501
When this value is not null, it indicates that the client is on the waiting list for the requested class. The only additional fields that are populated when this is not null are: * ClassId * ClientId You can call GET WaitlistEntries using `WaitlistEntryId` to obtain more data about this waiting list entry. # noqa: E501
:return: The waitlist_entry_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._waitlist_entry_id
@waitlist_entry_id.setter
def waitlist_entry_id(self, waitlist_entry_id):
"""Sets the waitlist_entry_id of this AddClientToClassVisit.
When this value is not null, it indicates that the client is on the waiting list for the requested class. The only additional fields that are populated when this is not null are: * ClassId * ClientId You can call GET WaitlistEntries using `WaitlistEntryId` to obtain more data about this waiting list entry. # noqa: E501
:param waitlist_entry_id: The waitlist_entry_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._waitlist_entry_id = waitlist_entry_id
@property
def appointment_id(self):
"""Gets the appointment_id of this AddClientToClassVisit. # noqa: E501
The appointment’s ID. # noqa: E501
:return: The appointment_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._appointment_id
@appointment_id.setter
def appointment_id(self, appointment_id):
"""Sets the appointment_id of this AddClientToClassVisit.
The appointment’s ID. # noqa: E501
:param appointment_id: The appointment_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._appointment_id = appointment_id
@property
def appointment_gender_preference(self):
"""Gets the appointment_gender_preference of this AddClientToClassVisit. # noqa: E501
The gender of staff member with whom the client prefers to book appointments. # noqa: E501
:return: The appointment_gender_preference of this AddClientToClassVisit. # noqa: E501
:rtype: str
"""
return self._appointment_gender_preference
@appointment_gender_preference.setter
def appointment_gender_preference(self, appointment_gender_preference):
"""Sets the appointment_gender_preference of this AddClientToClassVisit.
The gender of staff member with whom the client prefers to book appointments. # noqa: E501
:param appointment_gender_preference: The appointment_gender_preference of this AddClientToClassVisit. # noqa: E501
:type: str
"""
allowed_values = ["None", "Female", "Male"] # noqa: E501
if appointment_gender_preference not in allowed_values:
raise ValueError(
"Invalid value for `appointment_gender_preference` ({0}), must be one of {1}" # noqa: E501
.format(appointment_gender_preference, allowed_values)
)
self._appointment_gender_preference = appointment_gender_preference
@property
def appointment_status(self):
"""Gets the appointment_status of this AddClientToClassVisit. # noqa: E501
The status of the appointment. # noqa: E501
:return: The appointment_status of this AddClientToClassVisit. # noqa: E501
:rtype: str
"""
return self._appointment_status
@appointment_status.setter
def appointment_status(self, appointment_status):
"""Sets the appointment_status of this AddClientToClassVisit.
The status of the appointment. # noqa: E501
:param appointment_status: The appointment_status of this AddClientToClassVisit. # noqa: E501
:type: str
"""
allowed_values = ["None", "Requested", "Booked", "Completed", "Confirmed", "Arrived", "NoShow", "Cancelled", "LateCancelled"] # noqa: E501
if appointment_status not in allowed_values:
raise ValueError(
"Invalid value for `appointment_status` ({0}), must be one of {1}" # noqa: E501
.format(appointment_status, allowed_values)
)
self._appointment_status = appointment_status
@property
def class_id(self):
"""Gets the class_id of this AddClientToClassVisit. # noqa: E501
The class ID that was used to retrieve the visits. # noqa: E501
:return: The class_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._class_id
@class_id.setter
def class_id(self, class_id):
"""Sets the class_id of this AddClientToClassVisit.
The class ID that was used to retrieve the visits. # noqa: E501
:param class_id: The class_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._class_id = class_id
@property
def client_id(self):
"""Gets the client_id of this AddClientToClassVisit. # noqa: E501
The ID of the client associated with the visit. # noqa: E501
:return: The client_id of this AddClientToClassVisit. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this AddClientToClassVisit.
The ID of the client associated with the visit. # noqa: E501
:param client_id: The client_id of this AddClientToClassVisit. # noqa: E501
:type: str
"""
self._client_id = client_id
@property
def start_date_time(self):
"""Gets the start_date_time of this AddClientToClassVisit. # noqa: E501
The time this class is scheduled to start. # noqa: E501
:return: The start_date_time of this AddClientToClassVisit. # noqa: E501
:rtype: datetime
"""
return self._start_date_time
@start_date_time.setter
def start_date_time(self, start_date_time):
"""Sets the start_date_time of this AddClientToClassVisit.
The time this class is scheduled to start. # noqa: E501
:param start_date_time: The start_date_time of this AddClientToClassVisit. # noqa: E501
:type: datetime
"""
self._start_date_time = start_date_time
@property
def end_date_time(self):
"""Gets the end_date_time of this AddClientToClassVisit. # noqa: E501
The date and time the visit ends. The Public API returns UTC dates and times. For example, a class that occurs on June 25th, 2018 at 2:15PM (EST) appears as “2018-06-25T19:15:00Z” because EST is five hours behind UTC. Date time pairs always return in the format YYYY-MM-DDTHH:mm:ssZ. # noqa: E501
:return: The end_date_time of this AddClientToClassVisit. # noqa: E501
:rtype: datetime
"""
return self._end_date_time
@end_date_time.setter
def end_date_time(self, end_date_time):
"""Sets the end_date_time of this AddClientToClassVisit.
The date and time the visit ends. The Public API returns UTC dates and times. For example, a class that occurs on June 25th, 2018 at 2:15PM (EST) appears as “2018-06-25T19:15:00Z” because EST is five hours behind UTC. Date time pairs always return in the format YYYY-MM-DDTHH:mm:ssZ. # noqa: E501
:param end_date_time: The end_date_time of this AddClientToClassVisit. # noqa: E501
:type: datetime
"""
self._end_date_time = end_date_time
@property
def id(self):
"""Gets the id of this AddClientToClassVisit. # noqa: E501
The ID of the visit. # noqa: E501
:return: The id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AddClientToClassVisit.
The ID of the visit. # noqa: E501
:param id: The id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._id = id
@property
def last_modified_date_time(self):
"""Gets the last_modified_date_time of this AddClientToClassVisit. # noqa: E501
When included in the request, only records modified on or after the specified `LastModifiedDate` are included in the response. The Public API returns UTC dates and times. For example, a class that occurs on June 25th, 2018 at 2:15PM (EST) appears as “2018-06-25T19:15:00Z” because EST is five hours behind UTC. Date time pairs always return in the format YYYY-MM-DDTHH:mm:ssZ. # noqa: E501
:return: The last_modified_date_time of this AddClientToClassVisit. # noqa: E501
:rtype: datetime
"""
return self._last_modified_date_time
@last_modified_date_time.setter
def last_modified_date_time(self, last_modified_date_time):
"""Sets the last_modified_date_time of this AddClientToClassVisit.
When included in the request, only records modified on or after the specified `LastModifiedDate` are included in the response. The Public API returns UTC dates and times. For example, a class that occurs on June 25th, 2018 at 2:15PM (EST) appears as “2018-06-25T19:15:00Z” because EST is five hours behind UTC. Date time pairs always return in the format YYYY-MM-DDTHH:mm:ssZ. # noqa: E501
:param last_modified_date_time: The last_modified_date_time of this AddClientToClassVisit. # noqa: E501
:type: datetime
"""
self._last_modified_date_time = last_modified_date_time
@property
def late_cancelled(self):
"""Gets the late_cancelled of this AddClientToClassVisit. # noqa: E501
When `true`, indicates that the class has been `LateCancelled`.<br /> When `false`, indicates that the class has not been `LateCancelled`. # noqa: E501
:return: The late_cancelled of this AddClientToClassVisit. # noqa: E501
:rtype: bool
"""
return self._late_cancelled
@late_cancelled.setter
def late_cancelled(self, late_cancelled):
"""Sets the late_cancelled of this AddClientToClassVisit.
When `true`, indicates that the class has been `LateCancelled`.<br /> When `false`, indicates that the class has not been `LateCancelled`. # noqa: E501
:param late_cancelled: The late_cancelled of this AddClientToClassVisit. # noqa: E501
:type: bool
"""
self._late_cancelled = late_cancelled
@property
def location_id(self):
"""Gets the location_id of this AddClientToClassVisit. # noqa: E501
The ID of the location where the visit took place or is to take place. # noqa: E501
:return: The location_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._location_id
@location_id.setter
def location_id(self, location_id):
"""Sets the location_id of this AddClientToClassVisit.
The ID of the location where the visit took place or is to take place. # noqa: E501
:param location_id: The location_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._location_id = location_id
@property
def make_up(self):
"""Gets the make_up of this AddClientToClassVisit. # noqa: E501
When `true`, the client can make up this session and a session is not deducted from the pricing option that was used to sign the client into the enrollment. When the client has the make-up session, a session is automatically removed from a pricing option that matches the service category of the enrollment and is within the same date range of the missed session.<br /> When `false`, the client cannot make up this session. See [Enrollments: Make-ups](https://support.mindbodyonline.com/s/article/203259433-Enrollments-Make-ups?language=en_US) for more information. # noqa: E501
:return: The make_up of this AddClientToClassVisit. # noqa: E501
:rtype: bool
"""
return self._make_up
@make_up.setter
def make_up(self, make_up):
"""Sets the make_up of this AddClientToClassVisit.
When `true`, the client can make up this session and a session is not deducted from the pricing option that was used to sign the client into the enrollment. When the client has the make-up session, a session is automatically removed from a pricing option that matches the service category of the enrollment and is within the same date range of the missed session.<br /> When `false`, the client cannot make up this session. See [Enrollments: Make-ups](https://support.mindbodyonline.com/s/article/203259433-Enrollments-Make-ups?language=en_US) for more information. # noqa: E501
:param make_up: The make_up of this AddClientToClassVisit. # noqa: E501
:type: bool
"""
self._make_up = make_up
@property
def name(self):
"""Gets the name of this AddClientToClassVisit. # noqa: E501
The name of the class. # noqa: E501
:return: The name of this AddClientToClassVisit. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AddClientToClassVisit.
The name of the class. # noqa: E501
:param name: The name of this AddClientToClassVisit. # noqa: E501
:type: str
"""
self._name = name
@property
def service_id(self):
"""Gets the service_id of this AddClientToClassVisit. # noqa: E501
The ID of the pricing option used to pay for the class visit. # noqa: E501
:return: The service_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._service_id
@service_id.setter
def service_id(self, service_id):
"""Sets the service_id of this AddClientToClassVisit.
The ID of the pricing option used to pay for the class visit. # noqa: E501
:param service_id: The service_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._service_id = service_id
@property
def signed_in(self):
"""Gets the signed_in of this AddClientToClassVisit. # noqa: E501
When `true`, indicates that the client has been signed in.<br /> When `false`, indicates that the client has not been signed in. # noqa: E501
:return: The signed_in of this AddClientToClassVisit. # noqa: E501
:rtype: bool
"""
return self._signed_in
@signed_in.setter
def signed_in(self, signed_in):
"""Sets the signed_in of this AddClientToClassVisit.
When `true`, indicates that the client has been signed in.<br /> When `false`, indicates that the client has not been signed in. # noqa: E501
:param signed_in: The signed_in of this AddClientToClassVisit. # noqa: E501
:type: bool
"""
self._signed_in = signed_in
@property
def staff_id(self):
"""Gets the staff_id of this AddClientToClassVisit. # noqa: E501
The ID of the staff member who is teaching the class. # noqa: E501
:return: The staff_id of this AddClientToClassVisit. # noqa: E501
:rtype: int
"""
return self._staff_id
@staff_id.setter
def staff_id(self, staff_id):
"""Sets the staff_id of this AddClientToClassVisit.
The ID of the staff member who is teaching the class. # noqa: E501
:param staff_id: The staff_id of this AddClientToClassVisit. # noqa: E501
:type: int
"""
self._staff_id = staff_id
@property
def web_signup(self):
"""Gets the web_signup of this AddClientToClassVisit. # noqa: E501
When `true`, indicates that the client signed up online.<br /> When `false`, indicates that the client was signed up by a staff member. # noqa: E501
:return: The web_signup of this AddClientToClassVisit. # noqa: E501
:rtype: bool
"""
return self._web_signup
@web_signup.setter
def web_signup(self, web_signup):
"""Sets the web_signup of this AddClientToClassVisit.
When `true`, indicates that the client signed up online.<br /> When `false`, indicates that the client was signed up by a staff member. # noqa: E501
:param web_signup: The web_signup of this AddClientToClassVisit. # noqa: E501
:type: bool
"""
self._web_signup = web_signup
@property
def action(self):
"""Gets the action of this AddClientToClassVisit. # noqa: E501
The action taken. # noqa: E501
:return: The action of this AddClientToClassVisit. # noqa: E501
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this AddClientToClassVisit.
The action taken. # noqa: E501
:param action: The action of this AddClientToClassVisit. # noqa: E501
:type: str
"""
allowed_values = ["None", "Added", "Updated", "Failed", "Removed"] # noqa: E501
if action not in allowed_values:
raise ValueError(
"Invalid value for `action` ({0}), must be one of {1}" # noqa: E501
.format(action, allowed_values)
)
self._action = action
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddClientToClassVisit, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddClientToClassVisit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mindbody/API-Examples
|
SDKs/Python/swagger_client/models/add_client_to_class_visit.py
|
Python
|
bsd-2-clause
| 26,396
|
[
"VisIt"
] |
1e0c8a284f9e15ed138570dbac1b003bdc132a3db827e6199f63fbc6c61a4eb1
|
# common functions
from commonparameters import *
import numpy as np
import scipy.optimize
import subprocess
from itertools import count
import ConfigParser # import setting file
from optparse import OptionParser
import sys, os, time
from subprocess import Popen, list2cmdline
import glob
import time #check time of calculation
import datetime
import shutil
import math
import genetic
from scipy import stats
from pylab import plot,show
# ===============================================================================================
# READ FILES FUNCTIONS
# ===============================================================================================
#read experimental data from a file and return dic with format [[moleculename:deltaGhyd]]
def exp_read(namefile):
datadic = {}
try:
file = open(namefile,"r")
except:
print "FILE ERROR: %s has some problems or is not defined. Check it please" % (namefile)
exit()
for line in file:
linediv = line.split()
datadic[linediv[2]] = {"dgexp":float(linediv[0])} #format of exp data: deltaGhyd ! moleculename
file.close()
return datadic
#read parameters from a file and return dictonary with format [atsymbol: parameter value...]
def parameters_read(namefile):
paramdic = {}
try:
file = open(namefile,"r")
except:
print "FILE ERROR: %s has some problems or is not defined. Check it please" %(namefile)
exit()
for line in file:
print line
linediv = line.split()
paramdic[linediv[0]] = float(linediv[1])
file.close()
return paramdic
#read output of MOPAC
def mopacout_read(namefile):
# open file
mopacfile = open(namefile,"r")
heatformation = None
for line in mopacfile:
if KEYFHOF in line:
linediv = line.split()
#extract FINAL HEAT OF FORMATION in KCAL/MOL
heatformation = float(linediv[5])
elif KEYCA in line:
linediv = line.split()
#extract COSMO AREA in SQUARE ANGSTROMS
cosmoarea = float(linediv[3])
mopacfile.close()
if heatformation:
return heatformation,cosmoarea
else:
# try again,open file (horrible esto)
time.sleep(1.0)
mopacfile = open(namefile,"r")
heatformation = None
for line in mopacfile:
if KEYFHOF in line:
linediv = line.split()
#extract FINAL HEAT OF FORMATION in KCAL/MOL
heatformation = float(linediv[5])
elif KEYCA in line:
linediv = line.split()
#extract COSMO AREA in SQUARE ANGSTROMS
cosmoarea = float(linediv[3])
mopacfile.close()
if heatformation:
return heatformation,cosmoarea
print "ERROR: A mistake produced to try read the file " + namefile
exit()
#read COS (cosmo information) files of MOPAC
def cosmoout_read(namefile):
# open file
cosmofile = open(namefile,"r")
cosmoparamlist = []
passindicator = 0
for line in cosmofile:
if passindicator == 1:
try:
linediv=line.split()
atomicnumber = int(linediv[1])
atomiccosmoarea = float(linediv[7])
cosmoparamlist.append([atomicnumber,atomiccosmoarea])
except: #if find empty line
cosmofile.close()
if len(cosmoparamlist)==0:
print "ERROR: A mistake produced to try read COSMO output " + namefile
exit()
return cosmoparamlist
if KEYCOSMO in line:
passindicator = 1
# read .out file and extract number of electron by atoms
def electron_read(namefile):
electronlist = []
inputfile = open(namefile, "r")
tag = 0
for line in inputfile:
if "DIPOLE" in line:
tag = 0
elif tag == 1:
linediv = line.split()
electronlist.append(float(linediv[3])) #electron number
if "ATOM NO. TYPE CHARGE No. of ELECS. s-Pop p-Pop" in line:
tag = 1
inputfile.close()
return electronlist
# read .out file and extract number of electron by atoms + atomic number
def atomtypeelectron_read(namefile):
electronlist = []
inputfile = open(namefile, "r")
tag = 0
for line in inputfile:
if "DIPOLE" in line:
tag = 0
elif tag == 1:
linediv = line.split()
electronlist.append([linediv[1],float(linediv[3])]) #atom type - electron number
if "ATOM NO. TYPE CHARGE No. of ELECS. s-Pop p-Pop" in line:
tag = 1
inputfile.close()
return electronlist
# ===============================================================================================
# PRINT AND CONVERSION FUNCTIONS
# ===============================================================================================
#transform symbol to atomicnumber
def symbol_to_atomicnumber(symbol):
dictionary = {"ALL":0, "H":1, "C":6, "N":7, "O":8, "F":9, "P":15, "S":16, "Cl":17, "Br": 35, "I": 53}
try:
return dictionary[symbol]
except:
print "ERROR: the symbol " + symbol + " is not setting in function symbol_to_atomicnumber. Please check this"
exit()
#transform atomicnumber to symbol
def atomicnumber_to_symbol(atomicnumber):
dictionary = { 1:"H", 6:"C", 7:"N", 8:"O", 9:"F", 15:"P", 16:"S", 17:"Cl", 35:"Br", 53:"I"}
try:
return dictionary[atomicnumber]
except:
print "ERROR: the symbol " + str(atomicnumber) + " is not setting in function atomicnumber_to_symbol. Please check this"
exit()
#transform a key to atomic symbol
#symbols have the follow format: type@Symbol.modified, example: r@S.b, symbol of radii of sulfur atom mod b.
def key_to_symbol(key):
typeparam,symbolmod = key.split("@")
try:
symbol,mod = symbolmod.split(".")
except:
symbol = symbolmod
return symbol
#transform atomic number and electron number
#enumber: number of electrons
#return symbol + mod, example: S.b
# X.a ----> e@X.a <--- X.b ---> e@X.b <---X.c
def atomicnumber_to_symbolmod(atomicnumber, enumber, paramdic):
symbol = atomicnumber_to_symbol(atomicnumber)
try:
umbralb = paramdic["e@"+symbol + ".b"]
umbrala = paramdic["e@"+symbol + ".a"]
if enumber > umbralb:
mod = "c"
elif umbrala < enumber <= umbralb:
mod = "b"
else:
mod = "a"
except:
try:
umbral = paramdic["e@"+symbol + ".a"]
if enumber > umbral:
mod = "b"
else:
mod = "a"
except:
mod = "a" #"a" is the default mod
return symbol + "." + mod
#transform parameter dictionary to MOPAC format VDW(Cl=2.33;Br=2.50...)
def radii_to_text(paramdic):
line = "VDW("
#~ for key,value in paramdic.iteritems():
for key,value in sorted(paramdic.items()): #error in Mopac read vdw radii: Cl instead C
if "rc@" in key:
symbol = key_to_symbol(key)
line += str(symbol)+"="+str("{0:.4f}".format(value)+";")
line = line.strip(";") + ")"
line.strip()
return line
#print parameters dictionary to text
def print_param(paramdic):
line=""
for key, value in sorted(paramdic.iteritems()):
line += " %-2s % .5f " % (key,value)
line = line.strip()
return line
def print_summary(totalerror, mae, rmse, bias, r2, slope, intercept,ncycle,datadic,nptype):
summaryfile = open(DEFSUMMARYFILE,"w")
summaryfile.write("Cycle: %i TotError: %2.4f MAE: %2.4f RMSE: %2.4f BIAS: %2.4f R2: %1.5f Slope: %2.3e Intercept: %2.3e" % (ncycle, totalerror, mae, rmse, bias, r2, slope, intercept) + "\n")
if nptype == "claverie":
summaryfile.write("%-48s %s %s %s %s %s %s %s %s\n" % ("compoundname", "dgexp", "dgcalc", "error", "abserror","hof_gas","hof_cosmo","delta_hof","npterm(cavitation,disp)"))
reportlist =[]
for compoundname,value in datadic.iteritems():
reportlist.append([compoundname,datadic[compoundname]["dgexp"],datadic[compoundname]["dgcalc"],(datadic[compoundname]["dgexp"]-datadic[compoundname]["dgcalc"]), abs(datadic[compoundname]["dgexp"]-datadic[compoundname]["dgcalc"]),datadic[compoundname]["hofgas"],datadic[compoundname]["hofcosmo"], datadic[compoundname]["hofcosmo"] - datadic[compoundname]["hofgas"],datadic[compoundname]["cavitation"], datadic[compoundname]["dispterm"]])
reportlist.sort(reverse=True, key=lambda a: a[4])
#~ tagfirst = True
for compoundname, dgexp, dgcalc, error, abserror, hofgas, hofcosmo, deltahof, cavitation, dispterm in reportlist:
summaryfile.write("%-48s % +.2f % +.2f % +.2f % +.2f % +.2f %+.2f % +.2f % +.2f % +.2f % +.2f\n" % (compoundname, dgexp, dgcalc, error, abserror, hofgas, hofcosmo, deltahof, cavitation + dispterm,cavitation,dispterm))
#~ if abserror < 3.0 and tagfirst==True: # write ============ to divide error values under 3.0 kcal/mol
#~ summaryfile.write(10*"="+"\n")
#~ tagfirst = False
elif nptype == "gammasasa":
summaryfile.write("%-48s %s %s %s %s %s %s %s %s\n" % ("compoundname", "dgexp", "dgcalc", "error", "abserror","hof_gas","hof_cosmo","delta_hof","npterm"))
reportlist =[]
for compoundname,value in datadic.iteritems():
reportlist.append([compoundname,datadic[compoundname]["dgexp"],datadic[compoundname]["dgcalc"],(datadic[compoundname]["dgexp"]-datadic[compoundname]["dgcalc"]), abs(datadic[compoundname]["dgexp"]-datadic[compoundname]["dgcalc"]),datadic[compoundname]["hofgas"],datadic[compoundname]["hofcosmo"], datadic[compoundname]["hofcosmo"] - datadic[compoundname]["hofgas"],datadic[compoundname]["npterm"]])
reportlist.sort(reverse=True, key=lambda a: a[4])
#~ tagfirst = True
for compoundname, dgexp, dgcalc, error, abserror, hofgas, hofcosmo, deltahof, npterm in reportlist:
summaryfile.write("%-48s % +.2f % +.2f % +.2f % +.2f % +.2f %+.2f % +.2f % +.2f\n" % (compoundname, dgexp, dgcalc, error, abserror, hofgas, hofcosmo, deltahof, npterm))
#~ if abserror < 3.0 and tagfirst==True: # write ============ to divide error values under 3.0 kcal/mol
#~ summaryfile.write(10*"="+"\n")
#~ tagfirst = False
summaryfile.close()
#generate list of files inside directory with extension ext
def list_files(directory,ext=DEFINPUTEXT):
filelist = glob.glob(directory + "/*" + ext)
return sorted(filelist)
#make MOPAC gas or cosmo input file taking a file as template
def inputfile_make(templatename, extrakeys, extrakeyssolv = "", paramdic = "", step = ""):
template = open(templatename, "r")
#if there is define a COSMO calculation
if extrakeyssolv:
try:
os.stat(step)
except:
os.makedirs(step) # create dir 00X
inputfile = open(step+"/"+os.path.basename(templatename).replace(".mop","_" + step + ".mop"), "w") #save the file inside dir 001/archivo_001.mop
#if there is not define a COSMO calculation, then is a GAS calculation
else:
try:
os.stat("gas")
except:
os.makedirs("gas") # create dir gas
inputfile = open("gas/"+os.path.basename(templatename).replace(".mop","_gas.mop"), "w") #save the file inside dir gas/archivo_gas.mop
if paramdic != "":
radiitext = radii_to_text(paramdic)
else:
radiitext = ""
#read template
for line in template:
if KEYDEFAULT in line:
line = line.replace(KEYDEFAULT, extrakeys + " " + extrakeyssolv + " " + radiitext)
inputfile.write(line)
template.close()
inputfile.close()
return 0
#run calculation in serial or parallel
def exec_commands(cmds, cores = DEFCORE):
''' Exec commands in parallel in multiple process
(as much as we have CPU)
'''
if not cmds: return # empty list
def done(p):
return p.poll() is not None
def success(p):
return p.returncode == 0
def fail():
sys.exit(1)
max_task = cores
processes = []
while True:
while cmds and len(processes) < max_task:
task = cmds.pop()
#print list2cmdline(task)
processes.append(Popen(task))
for p in processes:
if done(p):
if success(p):
processes.remove(p)
else:
fail()
if not processes and not cmds:
break
else:
pass
#~ time.sleep(1E-13)
# ===============================================================================================
# MAIN AND SOLVATION FUNCTIONS
# ===============================================================================================
#calculation of cavitation term with simplified Pierotti equation
def calc_pierotti(rsolv,rsphere,yrel):
yratio = yrel/(1-yrel)
return RCONST*TEMP*(-math.log(1-yrel) + (3*yratio)*(rsphere/rsolv)+(3*yratio+4.5*(yratio)*(yratio))*(rsphere/rsolv)*(rsphere/rsolv)) * J2KCAL
#calculation of no-polar term as sum (gamma_i * area_i)
#input:
# comosparamlist: [[atomicnumber_i, cosmoarea_i],...
# gammadic: {atomicnumber_i:gamma_i, ...
#output:
# no-polar term (float)
def np_calc(cosmoparamlist,paramdic,nptype,yrel,electronlist=None):
if nptype == "claverie": # if claverie-pierotti term is activated
dispterm = 0
cavitationterm = 0
i = 0
rsolv = paramdic["rsolv@"]
for atomicnumber,cosmoarea in cosmoparamlist:
symbolmod = atomicnumber_to_symbolmod(atomicnumber,electronlist[i],paramdic)
gkey = "g@" + symbolmod
gamma = paramdic[gkey]
# check if there r@ param, else look for rc@, then else print warning and exit
try:
rkey = "r@" + symbolmod
radii = paramdic[rkey]
except:
try:
rkey = "rc@" + symbolmod
radii = paramdic[rkey]
except:
print "There is not radius defined (r@ or rc@) for " + symbolmod + ". Please check it"
exit()
#~ dispterm += gamma*cosmoarea
dispterm += gamma*cosmoarea*(rsolv+radii)*(rsolv+radii)/(radii*radii) #change to scale SES area to SAS: (r+Rsolv)^2/r^2
#~ cavitationterm += calc_pierotti(rsolv,radii,yrel)*cosmoarea/((radii+rsolv)*(radii+rsolv)*PI*4/3) # sum of Claverie terms
#~ cavitationterm += calc_pierotti(rsolv,radii+rsolv,yrel)*(cosmoarea/((radii+rsolv)*(radii+rsolv)*PI*4/3)) # sum of Claverie terms 0.42x+0.52
cavitationterm += calc_pierotti(rsolv,radii,yrel)*(cosmoarea/((radii+rsolv)*(radii+rsolv)*PI*4/3)) # sum of Claverie terms 1.37x+0.12
#~ cavitationterm += calc_pierotti(rsolv,radii,yrel)*(cosmoarea/((radii)*(radii)*PI*4/3)) # sum of Claverie terms 0.31x+1.21
#~ cavitationterm += calc_pierotti(rsolv,radii+rsolv,yrel)*(cosmoarea/((radii)*(radii)*PI*4/3)) # sum of Claverie terms 0.09x+2.76
i += 1
npterm = dispterm + cavitationterm
return npterm, dispterm, cavitationterm
elif nptype == "electronmod": # if electron modification term is activated (is a test)
npterm = 0
i = 0
for atomicnumber,cosmoarea in cosmoparamlist:
npterm += paramdic[key]*cosmoarea*electronlist[i]
i += 1
return npterm
elif nptype == "gammasasa": # or simple no-polar term: sum_i (gamma_i x cosmoarea_i)
dispterm = 0
cavitationterm = 0
i = 0
rsolv = paramdic["rsolv@"]
npterm = 0
for atomicnumber,cosmoarea in cosmoparamlist:
symbolmod = atomicnumber_to_symbolmod(atomicnumber,electronlist[i],paramdic)
gkey = "g@" + symbolmod
gamma = paramdic[gkey]
#~ npterm += gamma*cosmoarea
try:
rkey = "rc@" + symbolmod
radii = paramdic[rkey]
except:
print "There is not radius defined (r@ or rc@) for " + symbolmod + ". Please check it"
exit()
npterm += gamma*cosmoarea
#~ npterm += gamma*cosmoarea*(rsolv+radii)*(rsolv+radii)/(radii*radii) #change to scale SES area to SAS: (r+Rsolv)^2/r^2
i += 1
return npterm
# calculation of no-polar term with only one gamma value
def np_calc_simple(cosmoarea,gammavalue):
return gammavalue*cosmoarea
#error between experimental and calculated data
# r0list original radii
# paramtestlist list with new radii and gamma to test
# dgreflist experimental Gibbs energy
# dgcalc calculated Gibbs energy to test
# hfgaslist: list of heat of formations in gas phase
def calc_error(numberstep, paramdic, datadic, extrakeys, extrakeyssolv, outfile, nptype ,yrel=None):
#generate MOPAC input files corresponding to a numberstep generation
for key, value in datadic.iteritems():
inputfile_make(value["template"],extrakeys,extrakeyssolv,paramdic,numberstep)
#run COSMO phase calculation
cosmoinputlist = list_files(numberstep)
commands = []
for cosmoinput in cosmoinputlist:
commands.append([MOPACPATH, cosmoinput])
exec_commands(commands)
gasindex = 0
errorlist = []
xdata = []
ydata = []
for key, value in datadic.iteritems():
cosmoinput =str(numberstep)+"/"+os.path.basename(datadic[key]["template"]).replace(".mop","_" + str(numberstep) + ".mop")
hofcosmo, areacosmo = mopacout_read(cosmoinput.replace(".mop",".out")) #read MOPAC output from .out
cosmoparamlist = cosmoout_read(cosmoinput.replace(".mop",".cos")) #read cosmo param from .cos
electronlist = electron_read(cosmoinput.replace(".mop",".out")) #read number of electron by atom from MOPAC output
datadic[key]["hofcosmo"] = hofcosmo #extract HOF
datadic[key]["areacosmo"] = areacosmo #extract AREA
#no-polar term
if nptype == "claverie":
npterm, dispterm, cavitationterm = np_calc(cosmoparamlist,paramdic,nptype,yrel,electronlist)
datadic[key]["dispterm"] = dispterm
datadic[key]["cavitation"] = cavitationterm
#~ elif nptype == "electronmod":
#~ npterm = np_calc(cosmoparamlist,gammatestdic,rtestdic,rsolv,nptype,yrel,electronlist)
#~ npdetaillist.append(npterm)
elif nptype == "gammasasa":
npterm = np_calc(cosmoparamlist,paramdic,nptype,yrel,electronlist)
datadic[key]["npterm"] = npterm
else:
#mono gamma
gamma = paramtestlist[len(rtestlistcomplete)]
npterm = np_calc_simple(areacosmo,gamma)
npdetaillist.append(npterm)
#Hf_gas
hofgas = value["hofgas"]
gasindex += 1
# \delta G_calc = Hf_cosmo - Hf_gas + no-polar_term
dgcalc = hofcosmo - hofgas + npterm
#~
#~ dgcalc = npterm #reemplazado para optimizar solo parte no polar
#~ dgcalc = hofcosmo - hofgas #reemplazado para optimizar solo la parte electrostatica
datadic[key]["dgcalc"]= dgcalc
xdata.append(datadic[key]["dgexp"]) #experimental data as x-axis
ydata.append(datadic[key]["dgcalc"]) #calc data as y-axis
datadic[key]["error"] = datadic[key]["dgexp"]-datadic[key]["dgcalc"]
errorlist.append(datadic[key]["error"])
errorarray = np.array(errorlist)
mae = np.mean(abs(errorarray))
rmse = math.sqrt(np.mean((errorarray)**2))
bias = np.mean(errorarray)
slope, intercept, r2 = fit_lineal(xdata,ydata) #linear fit
totalerror = mae #var to optimize
prevline = "%-5s Err: %3.4f MAE: %3.4f RMSE: %3.4f BIAS: %3.4f R2: %1.5f " % (numberstep, totalerror, mae, rmse, bias, r2)
outfile.write(prevline + print_param(paramdic))
return totalerror, mae, rmse, bias, r2, slope, intercept, datadic
#calculation of metrics by element
def calc_staticsbyelement(numberstep, datadic, elementsymbol):
errorlist = []
electronlistbyelementlist =[]
xdata = []
ydata = []
for key, value in datadic.iteritems():
cosmoinput =str(numberstep)+"/"+os.path.basename(datadic[key]["template"]).replace(".mop","_" + str(numberstep) + ".mop")
#~ hofcosmo, areacosmo = mopacout_read(cosmoinput.replace(".mop",".out")) #read MOPAC output from .out
cosmoparamlist = cosmoout_read(cosmoinput.replace(".mop",".cos")) #read cosmo param from .cos
atomtypeelectronlist = atomtypeelectron_read(cosmoinput.replace(".mop",".out")) #read atomic symbol - number of electron from MOPAC output
tag = False
# atomtypeelectronlist = [[atomsymbol1,electronnumber1], [atomsymbol2,electronnumber2], ...]
for atomtypeelectron in atomtypeelectronlist:
if elementsymbol in atomtypeelectron[0]:
errorlist.append(datadic[key]["error"])
electronlistbyelementlist.append(atomtypeelectron[1])
if tag == False: # store dgexp vs dgcalc point only once
tag = True
xdata.append(datadic[key]["dgexp"]) #experimental data as x-axis
ydata.append(datadic[key]["dgcalc"]) #calc data as y-axis
errorarray = np.array(errorlist)
try:
mae = np.mean(abs(errorarray))
rmse = math.sqrt(np.mean((errorarray)**2))
bias = np.mean(errorarray)
# calc vs experimental
slope, intercept, r2 = fit_lineal(xdata,ydata) #linear fit
# error vs electron number
errorslope, errorintercept, errorr2 = fit_lineal(electronlistbyelementlist,errorlist) #linear fit
except:
pass
return mae, rmse, bias, r2, slope, intercept, errorr2, errorslope, errorintercept
#
# name: check_restrictions
# Check if a serie of conditions are True or False
#
# @param paramdic: parameters to tested
# @type paramtestlist: dictionary
#
# @return If check is true or not
# @rtype binary
def check_restrictions(paramdic, fixlimitdic):
checker=0
#check radii F < Cl < Br < I
if (paramdic["rc@F.a"]<=paramdic["rc@Cl.a"]) and (paramdic["rc@Cl.a"]<=paramdic["rc@Br.a"]) and (paramdic["rc@Br.a"]<=paramdic["rc@I.a"]):
checker = 1
else:
#~ #~#print "fail order between F < Cl < Br < I"
return 0
#check radii O <= S
if paramdic["rc@O.a"] < paramdic["rc@S.a"]:
checker = 1
else:
#~ #print "fail order between O < S"
return 0
#check radii N < P
if paramdic["rc@N.a"] <= paramdic["rc@P.a"]:
checker = 1
else:
#~ #~#print "fail order between N < P"
return 0
#check radii of second period C > N > O > F
if (paramdic["rc@C.a"]>=paramdic["rc@N.a"]) and (paramdic["rc@N.a"]>=paramdic["rc@O.a"]) and (paramdic["rc@O.a"]>=paramdic["rc@F.a"]):
checker = 1
else:
#~ #~#print "fail order between first row"
return 0
for key, value in paramdic.iteritems():
if fixlimitdic[key][0] <= value <= fixlimitdic[key][1]:
checker = 1
else:
#~ print key + " limites:" + str(fixlimitdic[key][0]) + " - " + str(fixlimitdic[key][1])
return 0
return checker
#
# name: make_gaussmodification
# Return an aleatory number with a Gaussian distribution obtained from a number. This number is inside a range determined by rangevalue.
#
# @param value0: Number where the Gaussian distribution is center
# @type value0: number
#
# @param rangevalue: Specify the modification range.
# @type rangevalue: number
#
# @return Modified value.
# @rtype number
#
def make_gaussmodification(value0,rangevalue):
#gauss
#~ return value0 + rangevalue * np.random.normal(0,0.2)
#uniform
return value0 + rangevalue * np.random.uniform(-1,1)
#
# name: modified_values
# Return a new dictionary with modified values
#
# @param paramdic: contain name and value of parametes to optimize
# @type paramdic: dictonary
#
# @param freeparamlist: name of parameters free of modified
# @type rangevalue: list
#
# @return rrange, gammarange, rsolvrange
# @rtype number
#
def modified_values(paramdic, freeparamlist, rangesdic):
newparamdic = {} #store new values of parameters
for key, value in paramdic.iteritems():
if key in freeparamlist: #if the parameter is free, select the range to modified
if "r@" in key: #radii range
paramrange = rangesdic["radii"]
elif "g@" in key: #gamma range
paramrange = rangesdic["gamma"]
elif "rsolv@" in key: #rsolv range
paramrange = rangesdic["rsolv"]
elif "rc@" in key: #cosmo radii range
paramrange = rangesdic["cosmoradii"]
elif "e@" in key:
paramrange = rangesdic["eumbral"]
elif "k@" in key:
paramrange = rangesdic["k"]
newparamdic[key] = make_gaussmodification(paramdic[key],paramrange)
else:
newparamdic[key] = paramdic[key] #if the parameter is fix, then take the old value
return newparamdic
#
# name: check_excludeatom
# Check if an atom/atoms is/are present in a compound
#
# @param filename: file to check
# @type filename: string
#
# @param excludelist: list with symbols of atoms to exclude
# @type excludelist: list of string (eg. ["S","I"])
#
# @return If there are atoms of excludelist in filename then is True, else is False
# @rtype binary
def check_excludeatom(filename, excludelist):
filein = open(filename, "r")
for line in filein:
if any ((symbol+" ") in line[0:3] for symbol in excludelist) == True:
filein.close()
return True
filein.close()
return False
def fit_lineal(x,y):
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y)
return slope, intercept, r_value*r_value
# control MC temperature
def temperature_control(temp,mcmarklist,lastmark):
limitstore = 20
numberstoremark = float(len(mcmarklist))
mcmarklist.append(lastmark)
if numberstoremark<limitstore:
return temp, mcmarklist
else:
mcmarklist.pop(0)
numberdesc = mcmarklist.count(' @DESC\n')
numberprob = mcmarklist.count(' @PROB\n')
print "%s %f"%("prob",(float(numberprob + numberdesc)/numberstoremark))
if numberprob/numberstoremark > 0.4:
return temp * 0.95, mcmarklist
elif (numberprob + numberdesc)/numberstoremark < 0.2:
return temp * 1.05, mcmarklist
else:
return temp, mcmarklist
#Genetic Algorithm Core
#~ def runGA(ngeneration,
# ===============================================================================================
# CLASSES
# ===============================================================================================
class Datatest:
def __init__(self,radiilist,gammalist,rsolv):
self.radiilist = radiilist
self.gammalist = gammalist
self.rsolv = rsolv
self.fixparam = fixparam
|
lavecchia/cosmo
|
commonfunctions.py
|
Python
|
gpl-2.0
| 27,976
|
[
"Gaussian",
"MOPAC"
] |
6f9e61d804b68fd98250a43495bbee9bb55e156e1f7193aa3471e5b675909568
|
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
from . import link_functions
from .likelihood import Likelihood
from .gaussian import Gaussian
from ..core.parameterization import Param
from paramz.transformations import Logexp
from ..core.parameterization import Parameterized
import itertools
class MixedNoise(Likelihood):
def __init__(self, likelihoods_list, name='mixed_noise'):
#NOTE at the moment this likelihood only works for using a list of gaussians
super(Likelihood, self).__init__(name=name)
self.link_parameters(*likelihoods_list)
self.likelihoods_list = likelihoods_list
self.log_concave = False
def gaussian_variance(self, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
variance = np.zeros(ind.size)
for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))):
variance[ind==j] = lik.variance
return variance
def betaY(self,Y,Y_metadata):
#TODO not here.
return Y/self.gaussian_variance(Y_metadata=Y_metadata)[:,None]
def update_gradients(self, gradients):
self.gradient = gradients
def exact_inference_gradients(self, dL_dKdiag, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))])
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
ind = Y_metadata['output_index'].flatten()
_variance = np.array([self.likelihoods_list[j].variance for j in ind ])
if full_cov:
var += np.eye(var.shape[0])*_variance
else:
var += _variance
return mu, var
def predictive_variance(self, mu, sigma, Y_metadata):
_variance = self.gaussian_variance(Y_metadata)
return _variance + sigma**2
def predictive_quantiles(self, mu, var, quantiles, Y_metadata):
ind = Y_metadata['output_index'].flatten()
outputs = np.unique(ind)
Q = np.zeros( (mu.size,len(quantiles)) )
for j in outputs:
q = self.likelihoods_list[j].predictive_quantiles(mu[ind==j,:],
var[ind==j,:],quantiles,Y_metadata=None)
Q[ind==j,:] = np.hstack(q)
return [q[:,None] for q in Q.T]
def samples(self, gp, Y_metadata):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
N1, N2 = gp.shape
Ysim = np.zeros((N1,N2))
ind = Y_metadata['output_index'].flatten()
for j in np.unique(ind):
flt = ind==j
gp_filtered = gp[flt,:]
n1 = gp_filtered.shape[0]
lik = self.likelihoods_list[j]
_ysim = np.array([np.random.normal(lik.gp_link.transf(gpj), scale=np.sqrt(lik.variance), size=1) for gpj in gp_filtered.flatten()])
Ysim[flt,:] = _ysim.reshape(n1,N2)
return Ysim
|
befelix/GPy
|
GPy/likelihoods/mixed_noise.py
|
Python
|
bsd-3-clause
| 3,266
|
[
"Gaussian"
] |
6cefa342049a5072291a1f4cc79817f6dd7dd047ae44ec7a53f1fb161ea760c9
|
import os
import json
import base64
from lacore.adf.elements import (Archive, Meta, Links, Certificate,
Signature, Auth)
from lacore.adf.persist import make_adf, load_archive
from behave import step
from tempfile import NamedTemporaryFile
@step(u'I have 1 available archive')
def one_archive(context):
context.execute_steps(u'Given I have 1 available archive titled "foo"')
@step(u'I have 1 available archive titled "{title}"')
def one_archive_titled(context, title):
d = os.path.join(context.environ['HOME'], "Longaccess/archives")
if not os.path.isdir(d):
os.makedirs(d)
context.archive = NamedTemporaryFile(dir=d, suffix='.adf')
context.archive.write(make_adf([Archive(title, Meta('zip', 'aes-256-ctr')),
Auth(md5=('0'*32).decode('hex'))]))
context.archive.flush()
@step(u'the archive titled "{title}" has a link to a local copy')
def archive_copy(context, title):
assert context.archive
context.archive.seek(0)
docs = load_archive(context.archive)
context.archive.seek(0)
docs['links'] = Links(
local='file://' + os.path.join(context.environ['HOME'],
"Longaccess/data/test"))
context.archive.write(make_adf(list(docs.itervalues())))
context.archive.flush()
@step(u'the archive titled "{title}" is {size} KB big')
def archive_size(context, title, size):
assert context.archive
context.archive.seek(0)
docs = load_archive(context.archive)
context.archive.seek(0)
docs['archive'].meta.size = int(size) * 1024 * 1024
context.archive.write(make_adf(list(docs.itervalues())))
context.archive.flush()
@step(u'the local copy for "{title}" is an empty file')
def archive_copy_empty(context, title):
assert context.archive
datadir = os.path.join(context.environ['HOME'], "Longaccess/data")
if not os.path.exists(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, 'test'), 'w'):
pass
@step(u'I have a certificate for the archive with title "{title}"')
def archive_cert(context, title):
assert context.archive
d = os.path.join(context.environ['HOME'], "Longaccess/certs")
if not os.path.isdir(d):
os.makedirs(d)
context.cert = NamedTemporaryFile(dir=d, suffix='.adf', delete=False)
context.cert.write(make_adf([Archive(title, Meta('zip', 'aes-256-ctr')),
Auth(md5=('0'*32).decode('hex')),
Certificate()]))
context.cert.flush()
@step(u'I have 1 certificate titled "{title}"')
def one_cert(context, title):
d = os.path.join(context.environ['HOME'], "Longaccess/certs")
if not os.path.isdir(d):
os.makedirs(d)
context.certid = base64.urlsafe_b64encode(os.urandom(10))
f = NamedTemporaryFile(dir=d, suffix='.adf', delete=False)
f.write(make_adf([Archive(title, Meta('zip', 'aes-256-ctr')),
Auth(md5=('0'*32).decode('hex')),
Signature(aid=context.certid, uri=''),
Certificate()]))
f.flush()
context.certs[context.certid] = f
@step(u'I have downloaded an archive containing "{folder}"')
def downloaded_archive(context, folder):
prepare_archive_folder(context, folder)
from glob import glob
aglob = os.path.join(context.environ['HOME'], "Longaccess/archives/*")
af = glob(aglob).pop()
d = os.path.join(context.environ['HOME'], "Longaccess/certs")
if not os.path.isdir(d):
os.makedirs(d)
from shutil import copy
copy(af, d)
@step(u'I have {num} pending uploads')
def num_pending_uploads(context, num):
for n in range(int(num)):
pending_upload(context, "upload"+str(n))
@step(u'I have a pending upload titled "{title}"')
def pending_upload(context, title):
prepare_archive(context, title)
from glob import glob
aglob = os.path.join(context.environ['HOME'], "Longaccess/archives/*")
af = glob(aglob).pop()
d = os.path.join(context.environ['HOME'], "Longaccess/uploads")
if not os.path.isdir(d):
os.makedirs(d)
assert(context.mock_api)
import urlparse
url = urlparse.urljoin(context.mock_api.url(), 'path/to/api/upload/1')
with open(os.path.join(d, os.path.basename(af)), 'w') as out:
out.write(json.dumps({'uri': url}))
@step(u'the upload status is "{status}"')
def upload_status(context, status):
if status == 'error':
context.mock_api.test('uploadError', 'longaccessmock')
elif status == 'completed':
context.mock_api.test('uploadComplete', 'longaccessmock')
@step(u'there is an archive titled "{title}"')
def exists_archive_titled(context, title):
context.execute_steps(u"""
Given the command line arguments "archive list"
When I run console script "lacli"
Then I see "[ ]*{}"
""".format(title))
@step(u'there is a completed certificate')
def exists_certificate(context):
from glob import glob
files = glob(os.path.join(context.environ['HOME'], "Longaccess/certs/*"))
assert len(files) > 0, "there is a certificate"
from lacore.adf.persist import load_archive
docs = {}
with open(files[0]) as f:
docs = load_archive(f)
assert 'links' in docs
assert 'archive' in docs
assert hasattr(docs['signature'], 'aid')
assert docs['signature'].aid == 'https://longaccess.com/yoyoyo'
@step(u'there are {num} pending uploads')
def pending_upload_num(context, num):
from glob import glob
aglob = os.path.join(context.environ['HOME'], "Longaccess/uploads/*")
assert len(glob(aglob)) == int(num)
@step(u'I prepare an archive with a file "{title}"')
def prepare_archive(context, title):
context.execute_steps(u'''
Given an empty folder "{t}"
And under "{{{t}}}" an empty file "{t}"
And the command line arguments "archive create -t "{t}" {{{t}}}"
When I run console script "lacli"
Then I see "archive prepared"'''.format(t=title))
@step(u'I prepare an archive with a directory "{title}"')
def prepare_archive_folder(context, title):
context.execute_steps(u'''
Given the command line arguments "archive create -t "{t}" {{{t}}}"
When I run console script "lacli"
Then I see "archive prepared"'''.format(t=title))
|
longaccess/longaccess-client
|
features/steps/archive.py
|
Python
|
apache-2.0
| 6,361
|
[
"ADF"
] |
2e808eaddb4ab24e8d4cb9b9620d7fa56e853010566d917413e2f456b7c83e12
|
import urllib2
i = 1
places = ["Addison", "Algonquin" ,"Arlington Heights", "Barrington", "Barrington Hills" ,
"Bartlett" ,"Bensenville" ,"Bloomingdale", "Buffalo Grove" ,"Carol Stream" ,"Carpentersville" ,
"Des Plaines" ,"Dundee", "East Dundee", "Elgin" , "Elk Grove Village", "Elmhurst", "Glen Ellyn" ,
"Glendale Heights", "Hanover Park" ,"Hoffman Estates", "Itasca" ,"Lombard", "Medinah" ,"Mount Prospect",
"Oak Brook", "Oakbrook Terrace", "Palatine","Rolling Meadows","Roselle", "Schaumburg" ,"South Barrington",
"South Elgin", "Streamwood", "Villa Park", "Wheeling", "Wood Dale"]
amountOfPages = 52
while i < amountOfPages:
print "http://housestudentapps.challengepost.com/registrants"+ "?page=" + str(i)
words = urllib2.urlopen("http://housestudentapps.challengepost.com/registrants"+ "?page=" + str(i)).read().replace(","," ")
#print words
for k in places:
if k in words:
print "found one on page " + str(i)
i = i+ 1
|
ColdSauce/House-of-Reps-STEM-Finder
|
thingthing.py
|
Python
|
mit
| 943
|
[
"Elk"
] |
709e55febfbf0abbd9b0e0ffce295a95b03bb67cb56f561dbe4209ee1e5d01e4
|
import os
import string
gamess2qmc="gamess2qmc"
gamess="nohup nice /home/apps/gamess/rungms"
gos="/home/lkwagner/qmcsources/qmc_inherit/gos-Linux-3.2"
#We want to read the sysdef module from the current
#directory, so add this to Python's search path
import sys
sys.path.insert(0,".")
from sysdef import *
def print_gamess(atoms, file, basis, ecp_section, last_pun):
f=open(file, "w")
f.write(gamess_header)
if(last_pun != ''):
f.write(gamess_guess_line)
f.write(" $DATA \n \n")
f.write(gamess_symm+"\n")
for at in atoms:
string=" "+at.label+" "+ str(at.atomnum) + " " \
+ str(at.pos[0]) + " " + str(at.pos[1]) + " "\
+ str(at.pos[2]) + "\n"
f.write(string);
f.write(basis[at.basis]+ "\n")
f.write(" $END\n")
f.write(ecp_section)
if(last_pun != ''):
inf=open(last_pun, "r")
pr=0;
while(1):
line=inf.readline()
if(line==''):
break
if(line.rfind("$VEC") > 0) or line.rfind("$SCF") > 0:
pr=1
if(pr==1):
f.write(line)
f.close()
##################################################
class gos_dynamic_options:
root=""
wffile=""
md_wf=""
md_read=""
md_write=""
###################################################
def print_gos_opt(sopts, dopts):
"Print optimization file"
dopts.md_wf=dopts.root+".wf"
return """\
METHOD {
VMC
NBLOCK 1
NSTEP 5
TIMESTEP """ + str(sopts.vmc_timestep) + """
NDECORR """ + str(sopts.vmc_ndecorr) + """
STORECONFIG md_opt.config
READCONFIG md_opt.config
NCONFIG """ + str(sopts.nconfig) + """
EREF """ + str(sopts.eref) + """
}
METHOD {
OPTIMIZE
ITERATIONS """ + str(sopts.iterations)+ """
READCONFIG md_opt.config
NCONFIG """ + str(sopts.nconfig) + """
EREF """ + str(sopts.eref) + """
MINFUNCTION VARIANCE
WFOUTPUT """ + dopts.md_wf + """
PSEUDOTEMP /scratch/lkwagner/qmcpseudo
}
INCLUDE """ + dopts.root + """.sys
TRIALFUNC { INCLUDE """ + dopts.wffile + " } \n"
##################################################
def print_gos_md(sopts, dopts):
read=''
write=''
if(dopts.md_read != ''):
read="READCHECK " + dopts.md_read + "\n"
if(dopts.md_write !=''):
write="STORECHECK " + dopts.md_write +"\n"
atomweights=''
for at in atoms:
atomweights=atomweights + str(at.atomweight) + " "
return """\
METHOD {
MD
NSTEP """ +str(sopts.md_nsteps)+ """
TIMESTEP """ + str(sopts.md_timestep) + "\n " + \
read + write + """
ATOMIC_WEIGHTS { """ + atomweights + """ }
EMBED {
VMC
NBLOCK """ + str(sopts.md_vmc_nblock) + """
NSTEP """ + str(sopts.md_vmc_nstep) + """
NDECORR """ + str(sopts.vmc_ndecorr) + """
TIMESTEP """ + str(sopts.vmc_timestep) + """
NCONFIG """ + str(sopts.md_vmc_nconfig) + """
STORECONFIG md.config
""" + sopts.md_vmc_extra + """
#uncomment the following to read a configuration
READCONFIG md.config
EREF """ + str(sopts.eref) + """
}
}
INCLUDE """ + dopts.root + """.sys
TRIALFUNC { INCLUDE """ + dopts.md_wf+ " } \n"
##################################################
##################################################
#log=open(log_file, "w")
nsteps=int(simulation_time/(static_options.md_timestep*static_options.md_nsteps))
for i in range(0,nsteps):
#Run GAMESS
gamessroot=gamess_base + string.zfill(str(i),4)
gosroot=gos_base+string.zfill(str(i),4)
last_pun=''
if(i > 0):
last_pun=gamess_base+string.zfill(str(i-1), 4) + ".pun"
elif(gamess_punch_start!= ''):
last_pun=gamess_punch_start
print_gamess(atoms, gamessroot + ".inp", basis, ecp_sec, last_pun)
systext=gamess+" " + gamessroot + ' >& ' + gamessroot + ".out"
print "executing: " + systext
os.system(systext)
#Convert from GAMESS to QMC
systext=gamess2qmc +" -o " + gosroot + " " + gamessroot
print "executing: " + systext
os.system(systext)
os.remove(gosroot+".hf");
os.remove(gosroot+".jast");
os.remove(gosroot+".jast2");
refwf=gosroot+".refwf"
f=open(refwf, "w");
f.write("SLATER-JASTROW\n WF1 { INCLUDE " +\
gosroot + ".slater }\n WF2 { INCLUDE " + refjast + " }\n")
f.close()
#Set dynamic options
firstoptfile=gosroot+".opt"
dyn_opt=gos_dynamic_options()
dyn_opt.root=gosroot
dyn_opt.wffile=refwf
dyn_opt.md_write=gosroot+".mdcheck"
if(i > 0):
last_name=gos_base+string.zfill(str(i-1),4)
dyn_opt.md_read=last_name + ".mdcheck"
#Optimize the first wave function
f=open(firstoptfile, "w")
f.write(print_gos_opt(static_options, dyn_opt))
f.close()
systext=gos + " " + firstoptfile
print "executing: " + systext
os.system(systext)
#Run MD
mdfile=gosroot+".md"
f=open(mdfile, "w")
f.write(print_gos_md(static_options, dyn_opt))
f.close()
systext=gos + " " + mdfile
print "executing: " + systext
os.system(systext)
mdout=open(dyn_opt.md_write, "r")
mdout.readline() #natoms
mdout.readline() #current_pos
n=0
for at in atoms:
line=mdout.readline()
lsp=line.split()
at.pos[0]=float(lsp[0])
at.pos[1]=float(lsp[1])
at.pos[2]=float(lsp[2])
|
willwheelera/mainline
|
src/attic/md_drive.py
|
Python
|
gpl-2.0
| 5,427
|
[
"GAMESS"
] |
629143f2d3e35d1b55c2b00a1bd543f077c370a6b769d68db86e8087e923f098
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import random
import time
import threading
import six
from six.moves import _thread as thread
from hashlib import md5
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
__RCSID__ = "$Id$"
@six.add_metaclass(DIRACSingleton)
class LockRing(object):
def __init__(self):
self.__locks = {}
self.__events = {}
def __genName(self, container):
# TODO: Shouldn't this be a UUID?
name = md5(str(time.time() + random.random()).encode()).hexdigest()
retries = 10
while name in container and retries:
name = md5(str(time.time() + random.random()).encode()).hexdigest()
retries -= 1
return name
def getLock(self, lockName="", recursive=False):
if not lockName:
lockName = self.__genName(self.__locks)
try:
return self.__locks[lockName]
except KeyError:
if recursive:
self.__locks[lockName] = threading.RLock()
else:
self.__locks[lockName] = threading.Lock()
return self.__locks[lockName]
def getEvent(self, evName=""):
if not evName:
evName = self.__genName(self.__events)
try:
return self.__events[evName]
except KeyError:
self.__events[evName] = threading.Event()
return self.__events[evName]
def acquire(self, lockName):
try:
self.__locks[lockName].acquire()
except ValueError:
return S_ERROR("No lock named %s" % lockName)
return S_OK()
def release(self, lockName):
try:
self.__locks[lockName].release()
except ValueError:
return S_ERROR("No lock named %s" % lockName)
return S_OK()
def _openAll(self):
"""
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
DO NOT USE EXCEPT IN JUST SPAWNED NEW CHILD PROCESSES!!!!!!!!
NEVER IN THE PARENT PROCESS!!!!!!
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
"""
for lockName in self.__locks.keys():
try:
self.__locks[lockName].release()
except (RuntimeError, thread.error, KeyError):
pass
def _setAllEvents(self):
"""
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
DO NOT USE EXCEPT IN JUST SPAWNED NEW CHILD PROCESSES!!!!!!!!
NEVER IN THE PARENT PROCESS!!!!!!
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
"""
for evName in self.__events.keys():
try:
self.__events[evName].set()
except KeyError:
pass
if __name__ == "__main__":
lr = LockRing()
lock = lr.getLock("test1")
print("ACQUIRING LOCK", lock)
lock.acquire()
print("IS THE SAME LOCK? ", lock == lr.getLock("test1"))
print("OPENING ALL LOCKS")
lr._openAll()
print("REACQUIRING LOCK", lock)
lr.acquire("test1")
print("RELEASING LOCK")
lr.release("test1")
print("IS SINGLETON", lr == LockRing())
ev = lr.getEvent("POT")
ev.set()
lr._setAllEvents()
print("ALL OK")
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/LockRing.py
|
Python
|
gpl-3.0
| 3,081
|
[
"DIRAC"
] |
eeec0b36a314eff258c55f35ee08647409acd0e5228d787d649c85fa0013efb0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.