text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible.compat.six import string_types
from ansible.compat.six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
JSON_TYPES = {
'false': False,
'null': None,
'true': True,
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, JSON_TYPES, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
| wenottingham/ansible | lib/ansible/template/safe_eval.py | Python | gpl-3.0 | 4,604 | [
"VisIt"
] | 3d563d925331c92d62bf4b4cf596142643cb4d8eaaea0e73c1da846a1eb6cf3b |
"""
Module to measure spectral cross-correlation...
"""
import numpy as np
import lmfit
def fit_lag(arr1,arr2,kind='linear'):
if arr1.size != arr2.size:
raise ValueError("Size mismatch")
def mychi2(pars):
lag = pars['lag'].value
return chi2(arr1,arr2,lag)
fitpars = lmfit.Parameters()
fitpars['lag'] = lmfit.Parameter(value=0.0)
fitter = lmfit.minimize(mychi2,fitpars,args=())
return fitter
def shift(data, deltax, phase=0):
"""
FFT-based sub-pixel image shift
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html
"""
nx = data.size
Nx = np.fft.ifftshift(np.linspace(-np.fix(nx/2),np.ceil(nx/2)-1,nx))
gg = np.fft.ifft( np.fft.fft(data)* np.exp(1j*2*np.pi*(-deltax*Nx/nx)) * np.exp(-1j*phase) )
return gg
def chi2(arr1,arr2,lag):
from scipy.interpolate import interp1d
xv = np.arange(arr1.size)
#shifted = interp1d(xv, arr2, bounds_error=False, kind='cubic')(xv+lag)
#shifted[(xv+lag > xv.max()) + (xv+lag < xv.min())] = 0
#arr1cp = arr1 * (xv>lag) * (xv < (xv+lag).max())
#shifted = np.interp(xv, xv+lag, arr2, left=np.nan, right=np.nan)
shifted = np.real(shift(arr2, lag))
shifted[(xv-lag > xv.max()) + (xv-lag < xv.min())] = 0
arr1cp = arr1 * (xv>=lag) * (xv <= (xv+lag).max())
ngood = (shifted != 0).sum()
if (arr1cp!=0).sum() != ngood:
print "There are %i OK in input and %i ok in comparison" % ((arr1cp!=0).sum(),ngood)
if np.any(np.isnan(shifted)):
raise ValueError("Uncaught NAN")
return (arr1cp-shifted) / ngood**0.5
if __name__ == "__main__":
print "Running test code"
from pylab import *
xvals = np.linspace(-1,1,100)
inds = np.arange(xvals.size)
# gaussian
test_spec_1 = exp(-xvals**2/(2*0.25**2))
# power-law
test_spec_2 = np.abs(np.fft.fftshift(
np.fft.fft(np.random.randn(xvals.size)*xvals**-2 +
np.random.randn(xvals.size)*xvals**-2)*1j ))
test_spec_2 *= test_spec_1.sum()/test_spec_2.sum()
figure(1)
clf()
plot(xvals,test_spec_1)
plot(xvals,test_spec_2)
# total s/n = 100
noise = test_spec_1.sum() / 200.
#noise = test_spec_1.sum() / 1000.
noise1 = np.random.randn(xvals.size)*noise
test_spec_1n = test_spec_1+noise1
noise2 = np.random.randn(xvals.size)*noise
test_spec_2n = test_spec_2+noise2
figure(2)
clf()
plot(xvals,test_spec_1n)
plot(xvals,test_spec_2n)
figure(3)
clf()
plot(xvals,np.correlate(test_spec_1n,test_spec_1n,mode='same')/np.correlate(np.ones(xvals.size),np.ones(xvals.size),mode='same'))
plot(xvals,np.correlate(test_spec_2n,test_spec_2n,mode='same')/np.correlate(np.ones(xvals.size),np.ones(xvals.size),mode='same'))
figure(4)
clf()
plot(xvals,np.correlate(noise1,noise1,mode='same')/np.correlate(np.ones(xvals.size),np.ones(xvals.size),mode='same'))
plot(xvals,np.correlate(noise2,noise2,mode='same')/np.correlate(np.ones(xvals.size),np.ones(xvals.size),mode='same'))
chi2_1_wrap = np.array([((test_spec_1-roll(test_spec_1n,ii))**2).sum() for ii in (xvals.size/2-inds)])
chi2_2_wrap = np.array([((test_spec_2-roll(test_spec_2n,ii))**2).sum() for ii in (xvals.size/2-inds)])
figure(5)
clf()
plot(xvals.size-inds,chi2_1_wrap)
plot(xvals.size-inds,chi2_2_wrap)
ylabel("$\\chi^2$")
xlabel("Lag")
chi2_1_chop = np.array(
[((test_spec_1[:xvals.size-ii]-test_spec_1n[ii:])**2).sum() for ii in xrange(xvals.size/2)]+
[((test_spec_1[ii:]-test_spec_1n[:xvals.size-ii])**2).sum() for ii in xrange(xvals.size/2,0,-1)]
)
chi2_2_chop = np.array(
[((test_spec_2[:xvals.size-ii]-test_spec_2n[ii:])**2).sum() for ii in xrange(xvals.size/2)]+
[((test_spec_2[ii:]-test_spec_2n[:xvals.size-ii])**2).sum() for ii in xrange(xvals.size/2,0,-1)]
)
figure(6)
clf()
plot(xvals.size/2-inds,chi2_1_chop/(100-np.abs(np.linspace(50,-50,100))))
plot(xvals.size/2-inds,chi2_2_chop/(100-np.abs(np.linspace(50,-50,100))))
ylabel("$\\chi^2$")
xlabel("Lag")
chi2_smallshifts_1n = [(chi2(test_spec_1,test_spec_1n,xx)**2).sum() for xx in np.linspace(-5,5)]
chi2_smallshifts_2n = [(chi2(test_spec_2,test_spec_2n,xx)**2).sum() for xx in np.linspace(-5,5)]
figure(7)
clf()
plot(np.linspace(-5,5),chi2_smallshifts_1n)
plot(np.linspace(-5,5),chi2_smallshifts_2n)
ylabel("$\\chi^2$")
xlabel("Lag")
print "Best-fit shifts (real is zero): ",linspace(-5,5)[argmin(chi2_smallshifts_1n)],linspace(-5,5)[argmin(chi2_smallshifts_2n)]
from scipy.interpolate import interp1d
offset_spectra_1n = np.array([interp1d(inds, test_spec_1n, bounds_error=False, kind='cubic')(inds+lag) for lag in linspace(0,1,5)])
offset_spectra_2n = np.array([interp1d(inds, test_spec_2n, bounds_error=False, kind='cubic')(inds+lag) for lag in linspace(0,1,5)])
#offset_spectra_1n = np.array([interp(inds, (inds+lag), test_spec_1n) for lag in linspace(0,1,5)])
#offset_spectra_2n = np.array([interp(inds, (inds+lag), test_spec_2n) for lag in linspace(0,1,5)])
figure(8)
clf()
plot(xvals,offset_spectra_1n.T)
plot(xvals,offset_spectra_2n.T)
figure(9)
clf()
plot(xvals,(test_spec_1-offset_spectra_1n).T)
plot(xvals,(test_spec_2-offset_spectra_2n).T)
import scipy
import scipy.signal
xc_1n = scipy.signal.correlate(test_spec_1,test_spec_1n,mode='same')
xc_2n = scipy.signal.correlate(test_spec_2,test_spec_2n,mode='same')
figure(10)
clf()
plot(xc_1n)
plot(xc_2n)
print argmax(xc_1n),argmax(xc_2n)
shifted_1n = np.interp(inds, inds+1.75, test_spec_1n)
shifted_2n = np.interp(inds, inds+1.75, test_spec_2n)
fit1n = fit_lag(test_spec_1,shifted_1n)
fit2n = fit_lag(test_spec_2,shifted_2n)
print fit1n.params
print fit2n.params
xc_1n = scipy.signal.correlate(test_spec_1,shifted_1n,mode='same')
xc_2n = scipy.signal.correlate(test_spec_2,shifted_2n,mode='same')
figure(11)
clf()
plot(xc_1n)
plot(xc_2n)
print argmax(xc_1n),argmax(xc_2n)
def fftcorr(s1,s2,pad=0):
fs1 = np.fft.fft(s1-s1.mean())
fs2 = np.fft.fft(s2[::-1]-s2.mean())
mult = fs1*fs2
if pad:
mult = np.concatenate([np.zeros(pad),mult,np.zeros(pad)])
xc = np.fft.ifft(mult)
return np.fft.fftshift(np.abs(xc))
test_spec_1_padded = np.concatenate([np.zeros(300), test_spec_1, np.zeros(300)])
test_spec_2_padded = np.concatenate([np.zeros(300), test_spec_2, np.zeros(300)])
shifted_1n_padded = np.concatenate([np.zeros(300), shifted_1n, np.zeros(300)])
shifted_2n_padded = np.concatenate([np.zeros(300), shifted_2n, np.zeros(300)])
xc_1np = fftcorr(test_spec_1,shifted_1n,pad=300)
xc_2np = fftcorr(test_spec_2,shifted_2n,pad=300)
figure(12)
clf()
plot(xc_1np)
plot(xc_2np)
print argmax(xc_1np),argmax(xc_2np)
figure(13)
clf()
plot(xvals, test_spec_1)
plot(xvals, shift(test_spec_1,5.5))
plot(xvals, test_spec_1-shift(test_spec_1,5.5))
plot(xvals, shift(test_spec_1,55))
show()
| ufoym/agpy | agpy/cross_correlation.py | Python | mit | 7,294 | [
"Gaussian"
] | 05e918a77a6d73ff2b7547620df0f5a3c26dfb62685526f89b85030bd3862193 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class DNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
r"""Store DNA sequence data and optional associated metadata.
Only characters in the IUPAC DNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the DNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary interval metadata which applies to intervals within
a sequence to store interval features (such as genes on the
DNA sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC DNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC DNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
RNA
GrammaredSequence
Notes
-----
Subclassing is disabled for DNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import DNA
>>> DNA('ACCGAAT')
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
Convert lowercase characters to uppercase:
>>> DNA('AcCGaaT', lowercase=True)
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGT")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def transcribe(self):
"""Transcribe DNA into RNA.
DNA sequence is assumed to be the coding strand. Thymine (T) is
replaced with uracil (U) in the transcribed sequence.
Returns
-------
RNA
Transcribed sequence.
See Also
--------
translate
translate_six_frames
Notes
-----
DNA sequence's metadata, positional, and interval
metadata are included in the transcribed RNA sequence.
Examples
--------
Transcribe DNA into RNA:
>>> from skbio import DNA
>>> dna = DNA('TAACGTTA')
>>> dna
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
>>> dna.transcribe()
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
"""
seq = self._string.replace(b'T', b'U')
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
interval_metadata = None
if self.has_interval_metadata():
interval_metadata = self.interval_metadata
# turn off validation because `seq` is guaranteed to be valid
return skbio.RNA(seq, metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=interval_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, *args, **kwargs):
"""Translate DNA sequence into protein sequence.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
RNA.reverse_transcribe
RNA.translate
translate_six_frames
transcribe
Notes
-----
DNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> dna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same DNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> dna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
return self.transcribe().translate(*args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, *args, **kwargs):
"""Translate DNA into protein using six possible reading frames.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein. The six possible
reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
RNA.translate_six_frames
translate
transcribe
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
DNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> for protein in dna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
return self.transcribe().translate_six_frames(*args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(DNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(DNA, "find_motifs")
| gregcaporaso/scikit-bio | skbio/sequence/_dna.py | Python | bsd-3-clause | 12,736 | [
"scikit-bio"
] | 5c5f6ca61ec81fcc4080a7458e2e66163803ef9150a8c5a2ce212b4bb6c28fba |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# In this example, an image is centered at (0,0,0) before a
# rotation is applied to ensure that the rotation occurs about
# the center of the image.
reader = vtk.vtkPNGReader()
reader.SetDataSpacing(0.8,0.8,1.5)
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
reader.Update()
# first center the image at (0,0,0)
reslice = vtk.vtkImageReslice()
reslice.SetResliceAxesDirectionCosines([0,1,0,-1,0,0,0,0,1])
reslice.SetInputConnection(reader.GetOutputPort())
reslice.SetInformationInput(reader.GetOutput())
# reset the image back to the way it was (you don't have
# to do this, it is just put in as an example)
information2 = vtk.vtkImageChangeInformation()
information2.SetInputConnection(reslice.GetOutputPort())
information2.SetInformationInputData(reader.GetOutput())
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(information2.GetOutputPort())
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
| hlzz/dotfiles | graphics/VTK-7.0.0/Imaging/Core/Testing/Python/ResliceInformationInput.py | Python | bsd-3-clause | 1,137 | [
"VTK"
] | e93f6799ac199816ae54b8b262297ecdd06e49a06e737c2421c8d65237371a1b |
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
import CogHQLoader
from toontown.toonbase import ToontownGlobals
from direct.gui import DirectGui
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from direct.fsm import State
from direct.actor.Actor import Actor
import FactoryExterior
import FactoryInterior
import SellbotHQExterior
import SellbotHQBossBattle
from pandac.PandaModules import DecalEffect, NodePath
from direct.interval.IntervalGlobal import *
from direct.interval.LerpInterval import LerpHprInterval
from panda3d.core import Vec3
aspectSF = 0.7227
class SellbotCogHQLoader(CogHQLoader.CogHQLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('SellbotCogHQLoader')
def __init__(self, hood, parentFSMState, doneEvent):
CogHQLoader.CogHQLoader.__init__(self, hood, parentFSMState, doneEvent)
self.fsm.addState(State.State('factoryExterior', self.enterFactoryExterior, self.exitFactoryExterior, ['quietZone', 'factoryInterior', 'cogHQExterior']))
for stateName in ['start', 'cogHQExterior', 'quietZone']:
state = self.fsm.getStateNamed(stateName)
state.addTransition('factoryExterior')
self.fsm.addState(State.State('factoryInterior', self.enterFactoryInterior, self.exitFactoryInterior, ['quietZone', 'factoryExterior']))
for stateName in ['quietZone']:
state = self.fsm.getStateNamed(stateName)
state.addTransition('factoryInterior')
self.musicFile = 'phase_9/audio/bgm/encntr_suit_HQ_nbrhood.ogg'
self.cogHQExteriorModelPath = 'phase_9/models/cogHQ/SellbotHQExterior'
self.cogHQLobbyModelPath = 'phase_9/models/cogHQ/SellbotHQLobby'
self.factoryExteriorModelPath = 'phase_9/models/cogHQ/SellbotFactoryExterior'
self.geom = None
self.spot1Sequence = None
self.spot2Sequence = None
self.spot3Sequence = None
self.spot4Sequence = None
self.spot5Sequence = None
self.spot6Sequence = None
def load(self, zoneId):
CogHQLoader.CogHQLoader.load(self, zoneId)
Toon.loadSellbotHQAnims()
def unloadPlaceGeom(self):
if self.geom:
self.geom.removeNode()
self.geom = None
CogHQLoader.CogHQLoader.unloadPlaceGeom(self)
def loadPlaceGeom(self, zoneId):
self.notify.info('loadPlaceGeom: %s' % zoneId)
zoneId = zoneId - zoneId % 100
if zoneId == ToontownGlobals.SellbotHQ:
self.geom = loader.loadModel(self.cogHQExteriorModelPath)
factoryExteriorPOV = loader.loadModel('phase_9/models/cogHQ/SellbotFactoryExterior')
factoryExteriorPOV.reparentTo(self.geom)
factoryExteriorPOV.setPosHpr(400.62, -139.52, 15.22, 272.73, 0, 0)
factoryExteriorPOV.setScale(0.5)
dgLinkTunnel = self.geom.find('**/Tunnel1')
dgLinkTunnel.setName('linktunnel_dg_5316_DNARoot')
factoryLinkTunnel = self.geom.find('**/Tunnel2')
factoryLinkTunnel.setName('linktunnel_sellhq_11200_DNARoot')
cogSignModel = loader.loadModel('phase_4/models/props/sign_sellBotHeadHQ')
cogSign = cogSignModel.find('**/sign_sellBotHeadHQ')
cogSignSF = 23
dgSign = cogSign.copyTo(dgLinkTunnel)
dgSign.setPosHprScale(0.0, -291.5, 29, 180.0, 0.0, 0.0, cogSignSF, cogSignSF, cogSignSF * aspectSF)
dgSign.node().setEffect(DecalEffect.make())
dgText = DirectGui.OnscreenText(text=TTLocalizer.DaisyGardens[-1], font=ToontownGlobals.getSuitFont(), pos=(0, -0.3), scale=TTLocalizer.SCHQLdgText, mayChange=False, parent=dgSign)
dgText.setDepthWrite(0)
factorySign = cogSign.copyTo(factoryLinkTunnel)
factorySign.setPosHprScale(148.625, -155, 27, -90.0, 0.0, 0.0, cogSignSF, cogSignSF, cogSignSF * aspectSF)
factorySign.node().setEffect(DecalEffect.make())
factoryTypeText = DirectGui.OnscreenText(text=TTLocalizer.Sellbot, font=ToontownGlobals.getSuitFont(), pos=TTLocalizer.SellbotFactoryPosPart1, scale=TTLocalizer.SellbotFactoryScalePart1, mayChange=False, parent=factorySign)
factoryTypeText.setDepthWrite(0)
factoryText = DirectGui.OnscreenText(text=TTLocalizer.Factory, font=ToontownGlobals.getSuitFont(), pos=TTLocalizer.SellbotFactoryPosPart2, scale=TTLocalizer.SellbotFactoryScalePart2, mayChange=False, parent=factorySign)
factoryText.setDepthWrite(0)
doors = self.geom.find('**/doors')
door0 = doors.find('**/door_0')
door1 = doors.find('**/door_1')
door2 = doors.find('**/door_2')
door3 = doors.find('**/door_3')
for door in [door0, door1, door2, door3]:
doorFrame = door.find('**/doorDoubleFlat/+GeomNode')
door.find('**/doorFrameHoleLeft').wrtReparentTo(doorFrame)
door.find('**/doorFrameHoleRight').wrtReparentTo(doorFrame)
doorTrigger = door.find('**/door_trigger*')
doorTrigger.setY(doorTrigger.getY() - 1.5) # Fixes the misplaced door trigger.
doorFrame.node().setEffect(DecalEffect.make())
self.botcam1 = Actor("phase_9/models/char/BotCam-zero.bam",{"botcamneutral":"phase_9/models/char/BotCam-neutral.bam"})
self.bossroom = Actor("phase_9/models/cogHQ/BossRoomPOV.bam")
self.botcam1.reparentTo(self.geom)
self.botcam1.setPos(-0.01,-39.3,24)
self.botcam1.loop('botcamneutral')
self.bossroom.reparentTo(self.geom)
self.bossroom.setPos(42,25,298)
self.bossroom.setScale(0.1)
self.oldFactoryPOV = Actor("phase_9/models/cogHQ/SellbotLegFactoryOld.bam")
self.oldFactoryPOV.reparentTo(self.geom)
self.oldFactoryPOV.setPos(-525,-150,17)
self.oldFactoryPOV.setScale(0.2)
self.spotLights = self.geom.find('**/SpotLights')
self.spot1Sequence = Sequence(LerpHprInterval(self.spotLights.find('**/Spot1'), 7, Vec3(0, 1, 10), startHpr=Vec3(0, 1, -5)), LerpHprInterval(self.spotLights.find('**/Spot1'), 7, Vec3(0, 1, -5), startHpr=Vec3(0, 1, 10)))
self.spot2Sequence = Sequence(LerpHprInterval(self.spotLights.find('**/Spot2'), 7, Vec3(0, 1, 10), startHpr=Vec3(0, 1, -5)), LerpHprInterval(self.spotLights.find('**/Spot2'), 7, Vec3(0, 1, -5), startHpr=Vec3(0, 1, 10)))
self.spot3Sequence = Sequence(LerpHprInterval(self.spotLights.find('**/Spot3'), 7, Vec3(0, 1, 10), startHpr=Vec3(0, 1, -5)), LerpHprInterval(self.spotLights.find('**/Spot3'), 7, Vec3(0, 1, -5), startHpr=Vec3(0, 1, 10)))
self.spot4Sequence = Sequence(LerpHprInterval(self.spotLights.find('**/Spot4'), 7, Vec3(0, 1, 10), startHpr=Vec3(0, 1, -5)), LerpHprInterval(self.spotLights.find('**/Spot4'), 7, Vec3(0, 1, -5), startHpr=Vec3(0, 1, 10)))
self.spot5Sequence = Sequence(LerpHprInterval(self.spotLights.find('**/Spot5'), 7, Vec3(0, 1, 10), startHpr=Vec3(0, 1, -5)), LerpHprInterval(self.spotLights.find('**/Spot5'), 7, Vec3(0, 1, -5), startHpr=Vec3(0, 1, 10)))
self.spot6Sequence = Sequence(LerpHprInterval(self.spotLights.find('**/Spot6'), 7, Vec3(0, 1, 10), startHpr=Vec3(0, 1, -5)), LerpHprInterval(self.spotLights.find('**/Spot6'), 7, Vec3(0, 1, -5), startHpr=Vec3(0, 1, 10)))
self.spot1Sequence.loop()
self.spot2Sequence.loop()
self.spot3Sequence.loop()
self.spot4Sequence.loop()
self.spot5Sequence.loop()
self.spot6Sequence.loop()
elif zoneId == ToontownGlobals.SellbotFactoryExt:
self.geom = loader.loadModel(self.factoryExteriorModelPath)
factoryLinkTunnel = self.geom.find('**/tunnel_group2')
factoryLinkTunnel.setName('linktunnel_sellhq_11000_DNARoot')
factoryLinkTunnel.find('**/tunnel_sphere').setName('tunnel_trigger')
cogSignModel = loader.loadModel('phase_4/models/props/sign_sellBotHeadHQ')
cogSign = cogSignModel.find('**/sign_sellBotHeadHQ')
cogSignSF = 23
elevatorSignSF = 15
hqSign = cogSign.copyTo(factoryLinkTunnel)
hqSign.setPosHprScale(0.0, -353, 27.5, -180.0, 0.0, 0.0, cogSignSF, cogSignSF, cogSignSF * aspectSF)
hqSign.node().setEffect(DecalEffect.make())
hqTypeText = DirectGui.OnscreenText(text=TTLocalizer.Sellbot, font=ToontownGlobals.getSuitFont(), pos=(0, -0.25), scale=0.075, mayChange=False, parent=hqSign)
hqTypeText.setDepthWrite(0)
hqText = DirectGui.OnscreenText(text=TTLocalizer.Headquarters, font=ToontownGlobals.getSuitFont(), pos=(0, -0.34), scale=0.1, mayChange=False, parent=hqSign)
hqText.setDepthWrite(0)
junkyardPOV = loader.loadModel("phase_9/models/cogHQ/SellbotHQExterior")
junkyardPOV.reparentTo(self.geom)
junkyardPOV.setPos(-200,-635,0)
junkyardPOV.setH(-275)
junkyardPOV.setScale(0.5)
frontDoor = self.geom.find('**/doorway1')
fdSign = cogSign.copyTo(frontDoor)
fdSign.setPosHprScale(62.74, -87.99, 17.26, 2.72, 0.0, 0.0, elevatorSignSF, elevatorSignSF, elevatorSignSF * aspectSF)
fdSign.node().setEffect(DecalEffect.make())
fdTypeText = DirectGui.OnscreenText(text=TTLocalizer.Factory, font=ToontownGlobals.getSuitFont(), pos=(0, -0.25), scale=TTLocalizer.SCHQLfdTypeText, mayChange=False, parent=fdSign)
fdTypeText.setDepthWrite(0)
fdText = DirectGui.OnscreenText(text=TTLocalizer.SellbotFrontEntrance, font=ToontownGlobals.getSuitFont(), pos=(0, -0.34), scale=TTLocalizer.SCHQLdgText, mayChange=False, parent=fdSign)
fdText.setDepthWrite(0)
sideDoor = self.geom.find('**/doorway2')
sdSign = cogSign.copyTo(sideDoor)
sdSign.setPosHprScale(-164.78, 26.28, 17.25, -89.89, 0.0, 0.0, elevatorSignSF, elevatorSignSF, elevatorSignSF * aspectSF)
sdSign.node().setEffect(DecalEffect.make())
sdTypeText = DirectGui.OnscreenText(text=TTLocalizer.Factory, font=ToontownGlobals.getSuitFont(), pos=(0, -0.25), scale=0.075, mayChange=False, parent=sdSign)
sdTypeText.setDepthWrite(0)
sdText = DirectGui.OnscreenText(text=TTLocalizer.SellbotSideEntrance, font=ToontownGlobals.getSuitFont(), pos=(0, -0.34), scale=0.1, mayChange=False, parent=sdSign)
sdText.setDepthWrite(0)
elif zoneId == ToontownGlobals.SellbotLobby:
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: COGHQ: Visit SellbotLobby')
self.geom = loader.loadModel(self.cogHQLobbyModelPath)
front = self.geom.find('**/frontWall')
front.node().setEffect(DecalEffect.make())
door = self.geom.find('**/door_0')
parent = door.getParent()
door.wrtReparentTo(front)
doorFrame = door.find('**/doorDoubleFlat/+GeomNode')
door.find('**/doorFrameHoleLeft').wrtReparentTo(doorFrame)
door.find('**/doorFrameHoleRight').wrtReparentTo(doorFrame)
doorFrame.node().setEffect(DecalEffect.make())
door.find('**/leftDoor').wrtReparentTo(parent)
door.find('**/rightDoor').wrtReparentTo(parent)
self.geom.flattenStrong()
else:
self.notify.warning('loadPlaceGeom: unclassified zone %s' % zoneId)
CogHQLoader.CogHQLoader.loadPlaceGeom(self, zoneId)
def unload(self):
CogHQLoader.CogHQLoader.unload(self)
Toon.unloadSellbotHQAnims()
self.spot1Sequence.finish()
self.spot2Sequence.finish()
self.spot3Sequence.finish()
self.spot4Sequence.finish()
self.spot5Sequence.finish()
self.spot6Sequence.finish()
def enterFactoryExterior(self, requestStatus):
self.placeClass = FactoryExterior.FactoryExterior
self.enterPlace(requestStatus)
self.hood.spawnTitleText(requestStatus['zoneId'])
def exitFactoryExterior(self):
taskMgr.remove('titleText')
self.hood.hideTitleText()
self.exitPlace()
self.placeClass = None
def enterFactoryInterior(self, requestStatus):
self.placeClass = FactoryInterior.FactoryInterior
self.enterPlace(requestStatus)
def exitFactoryInterior(self):
self.exitPlace()
self.placeClass = None
def getExteriorPlaceClass(self):
return SellbotHQExterior.SellbotHQExterior
def getBossPlaceClass(self):
return SellbotHQBossBattle.SellbotHQBossBattle
| silly-wacky-3-town-toon/SOURCE-COD | toontown/coghq/SellbotCogHQLoader.py | Python | apache-2.0 | 12,750 | [
"VisIt"
] | 1a8a10833b8b0903acb837f4438f61ddebd2edc8abedda0558e0bc99a511969c |
""" SSH (Virtual) Computing Element
For a given IP/host it will send jobs directly through ssh
**Configuration Parameters**
Configuration for the SSHComputingElement submission can be done via the configuration system.
BatchSystem:
Underlying batch system that is going to be used to orchestrate executable files. The Batch System has to be
accessible from the LocalCE. By default, the LocalComputingElement submits directly on the host via the Host class.
SharedArea:
Area used to store executable/output/error files if they are not aready defined via BatchOutput, BatchError,
InfoArea, ExecutableArea and/or WorkArea. The path should be absolute.
BatchOutput:
Area where the job outputs are stored.
If not defined: SharedArea + '/data' is used.
If not absolute: SharedArea + path is used.
BatchError:
Area where the job errors are stored.
If not defined: SharedArea + '/data' is used.
If not absolute: SharedArea + path is used.
ExecutableArea:
Area where the executable files are stored if necessary.
If not defined: SharedArea + '/data' is used.
If not absolute: SharedArea + path is used.
SSHHost:
SSH host name
SSHUser:
SSH user login
SSHPassword:
SSH password
SSHPort:
Port number if not standard, e.g. for the gsissh access
SSHKey:
Location of the ssh private key for no-password connection
SSHOptions:
Any other SSH options to be used
SSHTunnel:
String defining the use of intermediate SSH host. Example::
ssh -i /private/key/location -l final_user final_host
SSHType:
SSH (default) or gsissh
**Code Documentation**
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import os
import json
import stat
import shutil
import errno
from urllib.parse import urlparse
from urllib.parse import quote
from urllib.parse import unquote
from shlex import quote as shlex_quote
import DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC import gLogger
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript
from DIRAC.Resources.Computing.BatchSystems.executeBatch import executeBatchContent
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.List import breakListIntoChunks
class SSH(object):
"""SSH class encapsulates passing commands and files through an SSH tunnel
to a remote host. It can use either ssh or gsissh access. The final host
where the commands will be executed and where the files will copied/retrieved
can be reached through an intermediate host if SSHTunnel parameters is defined.
SSH constructor parameters are defined in a SSH accessible Computing Element
in the Configuration System:
- SSHHost: SSH host name
- SSHUser: SSH user login
- SSHPassword: SSH password
- SSHPort: port number if not standard, e.g. for the gsissh access
- SSHKey: location of the ssh private key for no-password connection
- SSHOptions: any other SSH options to be used
- SSHTunnel: string defining the use of intermediate SSH host. Example:
'ssh -i /private/key/location -l final_user final_host'
- SSHType: ssh ( default ) or gsissh
The class public interface includes two methods:
sshCall( timeout, command_sequence )
scpCall( timeout, local_file, remote_file, upload = False/True )
"""
def __init__(self, host=None, parameters=None):
self.host = host
if parameters is None:
parameters = {}
if not host:
self.host = parameters.get("SSHHost", "")
self.user = parameters.get("SSHUser", "")
self.password = parameters.get("SSHPassword", "")
self.port = parameters.get("SSHPort", "")
self.key = parameters.get("SSHKey", "")
self.options = parameters.get("SSHOptions", "")
self.sshTunnel = parameters.get("SSHTunnel", "")
self.sshType = parameters.get("SSHType", "ssh")
if self.port:
self.options += " -p %s" % self.port
if self.key:
self.options += " -i %s" % self.key
self.options = self.options.strip()
self.log = gLogger.getSubLogger("SSH")
def __ssh_call(self, command, timeout):
try:
import pexpect
expectFlag = True
except BaseException:
from DIRAC.Core.Utilities.Subprocess import shellCall
expectFlag = False
if not timeout:
timeout = 999
if expectFlag:
ssh_newkey = "Are you sure you want to continue connecting"
try:
child = pexpect.spawn(command, timeout=timeout, encoding="utf-8")
i = child.expect([pexpect.TIMEOUT, ssh_newkey, pexpect.EOF, "assword: "])
if i == 0: # Timeout
return S_OK((-1, child.before, "SSH login failed"))
elif i == 1: # SSH does not have the public key. Just accept it.
child.sendline("yes")
child.expect("assword: ")
i = child.expect([pexpect.TIMEOUT, "assword: "])
if i == 0: # Timeout
return S_OK((-1, str(child.before) + str(child.after), "SSH login failed"))
elif i == 1:
child.sendline(self.password)
child.expect(pexpect.EOF)
return S_OK((0, child.before, ""))
elif i == 2:
# Passwordless login, get the output
return S_OK((0, child.before, ""))
if self.password:
child.sendline(self.password)
child.expect(pexpect.EOF)
return S_OK((0, child.before, ""))
return S_ERROR((-2, child.before, ""))
except Exception as x:
res = (-1, "Encountered exception %s: %s" % (Exception, str(x)))
return S_ERROR(res)
else:
# Try passwordless login
result = shellCall(timeout, command)
# print ( "!!! SSH command: %s returned %s\n" % (command, result) )
if result["Value"][0] == 255:
return S_ERROR((-1, "Cannot connect to host %s" % self.host, ""))
return result
def sshCall(self, timeout, cmdSeq):
"""Execute remote command via a ssh remote call
:param int timeout: timeout of the command
:param cmdSeq: list of command components
:type cmdSeq: python:list
"""
command = cmdSeq
if isinstance(cmdSeq, list):
command = " ".join(cmdSeq)
pattern = "__DIRAC__"
if self.sshTunnel:
command = command.replace("'", '\\\\\\"')
command = command.replace("$", "\\\\\\$")
command = '/bin/sh -c \' %s -q %s -l %s %s "%s \\"echo %s; %s\\" " \' ' % (
self.sshType,
self.options,
self.user,
self.host,
self.sshTunnel,
pattern,
command,
)
else:
# command = command.replace( '$', '\$' )
command = '%s -q %s -l %s %s "echo %s; %s"' % (
self.sshType,
self.options,
self.user,
self.host,
pattern,
command,
)
self.log.debug("SSH command: %s" % command)
result = self.__ssh_call(command, timeout)
self.log.debug("SSH command result %s" % str(result))
if not result["OK"]:
return result
# Take the output only after the predefined pattern
ind = result["Value"][1].find("__DIRAC__")
if ind == -1:
return result
status, output, error = result["Value"]
output = output[ind + 9 :]
if output.startswith("\r"):
output = output[1:]
if output.startswith("\n"):
output = output[1:]
result["Value"] = (status, output, error)
return result
def scpCall(self, timeout, localFile, remoteFile, postUploadCommand="", upload=True):
"""Perform file copy through an SSH magic.
:param int timeout: timeout of the command
:param str localFile: local file path, serves as source for uploading and destination for downloading.
Can take 'Memory' as value, in this case the downloaded contents is returned
as result['Value']
:param str remoteFile: remote file full path
:param str postUploadCommand: command executed on the remote side after file upload
:param bool upload: upload if True, download otherwise
"""
# shlex_quote aims to prevent any security issue or problems with filepath containing spaces
# it returns a shell-escaped version of the filename
localFile = shlex_quote(localFile)
remoteFile = shlex_quote(remoteFile)
if upload:
if self.sshTunnel:
remoteFile = remoteFile.replace("$", r"\\\\\$")
postUploadCommand = postUploadCommand.replace("$", r"\\\\\$")
command = '/bin/sh -c \'cat %s | %s -q %s %s@%s "%s \\"cat > %s; %s\\""\' ' % (
localFile,
self.sshType,
self.options,
self.user,
self.host,
self.sshTunnel,
remoteFile,
postUploadCommand,
)
else:
command = "/bin/sh -c \"cat %s | %s -q %s %s@%s 'cat > %s; %s'\" " % (
localFile,
self.sshType,
self.options,
self.user,
self.host,
remoteFile,
postUploadCommand,
)
else:
finalCat = "| cat > %s" % localFile
if localFile.lower() == "memory":
finalCat = ""
if self.sshTunnel:
remoteFile = remoteFile.replace("$", "\\\\\\$")
command = '/bin/sh -c \'%s -q %s -l %s %s "%s \\"cat %s\\"" %s\'' % (
self.sshType,
self.options,
self.user,
self.host,
self.sshTunnel,
remoteFile,
finalCat,
)
else:
remoteFile = remoteFile.replace("$", r"\$")
command = "/bin/sh -c '%s -q %s -l %s %s \"cat %s\" %s'" % (
self.sshType,
self.options,
self.user,
self.host,
remoteFile,
finalCat,
)
self.log.debug("SSH copy command: %s" % command)
return self.__ssh_call(command, timeout)
class SSHComputingElement(ComputingElement):
#############################################################################
def __init__(self, ceUniqueID):
"""Standard constructor."""
super(SSHComputingElement, self).__init__(ceUniqueID)
self.ceType = "SSH"
self.execution = "SSHCE"
self.submittedJobs = 0
self.outputTemplate = ""
self.errorTemplate = ""
############################################################################
def setProxy(self, proxy, valid=0):
"""
Set and prepare proxy to use
:param str proxy: proxy to use
:param int valid: proxy validity period
:return: S_OK/S_ERROR
"""
ComputingElement.setProxy(self, proxy, valid)
if self.ceParameters.get("SSHType", "ssh") == "gsissh":
result = self._prepareProxy()
if not result["OK"]:
gLogger.error("SSHComputingElement: failed to set up proxy", result["Message"])
return result
return S_OK()
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults(self)
# Now batch system specific ones
if "ExecQueue" not in self.ceParameters:
self.ceParameters["ExecQueue"] = self.ceParameters.get("Queue", "")
if "SharedArea" not in self.ceParameters:
# . isn't a good location, move to $HOME
self.ceParameters["SharedArea"] = "$HOME"
if "BatchOutput" not in self.ceParameters:
self.ceParameters["BatchOutput"] = "data"
if "BatchError" not in self.ceParameters:
self.ceParameters["BatchError"] = "data"
if "ExecutableArea" not in self.ceParameters:
self.ceParameters["ExecutableArea"] = "data"
if "InfoArea" not in self.ceParameters:
self.ceParameters["InfoArea"] = "info"
if "WorkArea" not in self.ceParameters:
self.ceParameters["WorkArea"] = "work"
def _reset(self):
"""Process CE parameters and make necessary adjustments"""
batchSystemName = self.ceParameters.get("BatchSystem", "Host")
if "BatchSystem" not in self.ceParameters:
self.ceParameters["BatchSystem"] = batchSystemName
result = self.loadBatchSystem(batchSystemName)
if not result["OK"]:
self.log.error("Failed to load the batch system plugin", batchSystemName)
return result
self.user = self.ceParameters["SSHUser"]
self.queue = self.ceParameters["Queue"]
self.submitOptions = self.ceParameters.get("SubmitOptions", "")
if "ExecQueue" not in self.ceParameters or not self.ceParameters["ExecQueue"]:
self.ceParameters["ExecQueue"] = self.ceParameters.get("Queue", "")
self.execQueue = self.ceParameters["ExecQueue"]
self.log.info("Using queue: ", self.queue)
self.sharedArea = self.ceParameters["SharedArea"]
self.batchOutput = self.ceParameters["BatchOutput"]
if not self.batchOutput.startswith("/"):
self.batchOutput = os.path.join(self.sharedArea, self.batchOutput)
self.batchError = self.ceParameters["BatchError"]
if not self.batchError.startswith("/"):
self.batchError = os.path.join(self.sharedArea, self.batchError)
self.infoArea = self.ceParameters["InfoArea"]
if not self.infoArea.startswith("/"):
self.infoArea = os.path.join(self.sharedArea, self.infoArea)
self.executableArea = self.ceParameters["ExecutableArea"]
if not self.executableArea.startswith("/"):
self.executableArea = os.path.join(self.sharedArea, self.executableArea)
self.workArea = self.ceParameters["WorkArea"]
if not self.workArea.startswith("/"):
self.workArea = os.path.join(self.sharedArea, self.workArea)
self.submitOptions = self.ceParameters.get("SubmitOptions", "")
self.removeOutput = True
if "RemoveOutput" in self.ceParameters:
if self.ceParameters["RemoveOutput"].lower() in ["no", "false", "0"]:
self.removeOutput = False
self.preamble = self.ceParameters.get("Preamble", "")
result = self._prepareRemoteHost()
if not result["OK"]:
return result
return S_OK()
def _prepareRemoteHost(self, host=None):
"""Prepare remote directories and upload control script"""
ssh = SSH(host=host, parameters=self.ceParameters)
# Make remote directories
dirTuple = tuple(
uniqueElements(
[self.sharedArea, self.executableArea, self.infoArea, self.batchOutput, self.batchError, self.workArea]
)
)
nDirs = len(dirTuple)
cmd = "mkdir -p %s; " * nDirs % dirTuple
cmd = "bash -c '%s'" % cmd
self.log.verbose("Creating working directories on %s" % self.ceParameters["SSHHost"])
result = ssh.sshCall(30, cmd)
if not result["OK"]:
self.log.error("Failed creating working directories", "(%s)" % result["Message"][1])
return result
status, output, _error = result["Value"]
if status == -1:
self.log.error("Timeout while creating directories")
return S_ERROR(errno.ETIME, "Timeout while creating directories")
if "cannot" in output:
self.log.error("Failed to create directories", "(%s)" % output)
return S_ERROR(errno.EACCES, "Failed to create directories")
# Upload the control script now
result = self._generateControlScript()
if not result["OK"]:
self.log.warn("Failed generating control script")
return result
localScript = result["Value"]
self.log.verbose(
"Uploading %s script to %s" % (self.batchSystem.__class__.__name__, self.ceParameters["SSHHost"])
)
remoteScript = "%s/execute_batch" % self.sharedArea
result = ssh.scpCall(30, localScript, remoteScript, postUploadCommand="chmod +x %s" % remoteScript)
if not result["OK"]:
self.log.warn("Failed uploading control script: %s" % result["Message"][1])
return result
status, output, _error = result["Value"]
if status != 0:
if status == -1:
self.log.warn("Timeout while uploading control script")
return S_ERROR("Timeout while uploading control script")
self.log.warn("Failed uploading control script: %s" % output)
return S_ERROR("Failed uploading control script")
# Delete the generated control script locally
try:
os.remove(localScript)
except OSError:
self.log.warn("Failed removing the generated control script locally")
return S_ERROR("Failed removing the generated control script locally")
# Chmod the control scripts
# self.log.verbose( 'Chmod +x control script' )
# result = ssh.sshCall( 10, "chmod +x %s/%s" % ( self.sharedArea, self.controlScript ) )
# if not result['OK']:
# self.log.warn( 'Failed chmod control script: %s' % result['Message'][1] )
# return result
# status, output, _error = result['Value']
# if status != 0:
# if status == -1:
# self.log.warn( 'Timeout while chmod control script' )
# return S_ERROR( 'Timeout while chmod control script' )
# else:
# self.log.warn( 'Failed uploading chmod script: %s' % output )
# return S_ERROR( 'Failed uploading chmod script' )
return S_OK()
def _generateControlScript(self):
"""Generates a control script from a BatchSystem and a script called executeBatch
:return: a path containing the script generated
"""
# Get the batch system module to use
batchSystemDir = os.path.join(os.path.dirname(DIRAC.__file__), "Resources", "Computing", "BatchSystems")
batchSystemScript = os.path.join(batchSystemDir, "%s.py" % self.batchSystem.__class__.__name__)
# Get the executeBatch.py content: an str variable composed of code content that has to be extracted
# The control script is generated from the batch system module and this variable
controlScript = os.path.join(batchSystemDir, "control_script.py")
try:
shutil.copyfile(batchSystemScript, controlScript)
with open(controlScript, "a") as cs:
cs.write(executeBatchContent)
except IOError:
return S_ERROR("IO Error trying to generate control script")
return S_OK("%s" % controlScript)
def __executeHostCommand(self, command, options, ssh=None, host=None):
if not ssh:
ssh = SSH(host=host, parameters=self.ceParameters)
options["BatchSystem"] = self.batchSystem.__class__.__name__
options["Method"] = command
options["SharedDir"] = self.sharedArea
options["OutputDir"] = self.batchOutput
options["ErrorDir"] = self.batchError
options["WorkDir"] = self.workArea
options["InfoDir"] = self.infoArea
options["ExecutionContext"] = self.execution
options["User"] = self.user
options["Queue"] = self.queue
options = json.dumps(options)
options = quote(options)
cmd = (
"bash --login -c 'python %s/execute_batch %s || python3 %s/execute_batch %s || python2 %s/execute_batch %s'"
% (self.sharedArea, options, self.sharedArea, options, self.sharedArea, options)
)
self.log.verbose("CE submission command: %s" % cmd)
result = ssh.sshCall(120, cmd)
if not result["OK"]:
self.log.error("%s CE job submission failed" % self.ceType, result["Message"])
return result
sshStatus = result["Value"][0]
sshStdout = result["Value"][1]
sshStderr = result["Value"][2]
# Examine results of the job submission
if sshStatus == 0:
output = sshStdout.strip().replace("\r", "").strip()
try:
index = output.index("============= Start output ===============")
output = output[index + 42 :]
except Exception:
return S_ERROR("Invalid output from remote command: %s" % output)
try:
output = unquote(output)
result = json.loads(output)
if isinstance(result, six.string_types) and result.startswith("Exception:"):
return S_ERROR(result)
return S_OK(result)
except Exception:
return S_ERROR("Invalid return structure from job submission")
else:
return S_ERROR("\n".join([sshStdout, sshStderr]))
def submitJob(self, executableFile, proxy, numberOfJobs=1):
# self.log.verbose( "Executable file path: %s" % executableFile )
if not os.access(executableFile, 5):
os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
if proxy:
self.log.verbose("Setting up proxy for payload")
wrapperContent = bundleProxy(executableFile, proxy)
name = writeScript(wrapperContent, os.getcwd())
submitFile = name
else: # no proxy
submitFile = executableFile
result = self._submitJobToHost(submitFile, numberOfJobs)
if proxy:
os.remove(submitFile)
return result
def _submitJobToHost(self, executableFile, numberOfJobs, host=None):
"""Submit prepared executable to the given host"""
ssh = SSH(host=host, parameters=self.ceParameters)
# Copy the executable
submitFile = os.path.join(self.executableArea, os.path.basename(executableFile))
result = ssh.scpCall(30, executableFile, submitFile, postUploadCommand="chmod +x %s" % submitFile)
if not result["OK"]:
return result
jobStamps = []
for _i in range(numberOfJobs):
jobStamps.append(makeGuid()[:8])
numberOfProcessors = self.ceParameters.get("NumberOfProcessors", 1)
wholeNode = self.ceParameters.get("WholeNode", False)
# numberOfNodes is treated as a string as it can contain values such as "2-4"
# where 2 would represent the minimum number of nodes to allocate, and 4 the maximum
numberOfNodes = self.ceParameters.get("NumberOfNodes", "1")
self.numberOfGPUs = self.ceParameters.get("NumberOfGPUs")
# Collect command options
commandOptions = {
"Executable": submitFile,
"NJobs": numberOfJobs,
"SubmitOptions": self.submitOptions,
"JobStamps": jobStamps,
"WholeNode": wholeNode,
"NumberOfProcessors": numberOfProcessors,
"NumberOfNodes": numberOfNodes,
"Preamble": self.preamble,
"NumberOfGPUs": self.numberOfGPUs,
}
if host:
commandOptions["SSHNodeHost"] = host
resultCommand = self.__executeHostCommand("submitJob", commandOptions, ssh=ssh, host=host)
if not resultCommand["OK"]:
return resultCommand
result = resultCommand["Value"]
if result["Status"] != 0:
return S_ERROR("Failed job submission: %s" % result["Message"])
else:
batchIDs = result["Jobs"]
if batchIDs:
batchSystemName = self.batchSystem.__class__.__name__.lower()
if host is None:
jobIDs = [
"%s%s://%s/%s" % (self.ceType.lower(), batchSystemName, self.ceName, _id) for _id in batchIDs
]
else:
jobIDs = [
"%s%s://%s/%s/%s" % (self.ceType.lower(), batchSystemName, self.ceName, host, _id)
for _id in batchIDs
]
else:
return S_ERROR("No jobs IDs returned")
result = S_OK(jobIDs)
self.submittedJobs += len(batchIDs)
return result
def killJob(self, jobIDList):
"""Kill a bunch of jobs"""
if isinstance(jobIDList, six.string_types):
jobIDList = [jobIDList]
return self._killJobOnHost(jobIDList)
def _killJobOnHost(self, jobIDList, host=None):
"""Kill the jobs for the given list of job IDs"""
jobDict = {}
for job in jobIDList:
stamp = os.path.basename(urlparse(job).path)
jobDict[stamp] = job
stampList = list(jobDict)
commandOptions = {"JobIDList": stampList, "User": self.user}
resultCommand = self.__executeHostCommand("killJob", commandOptions, host=host)
if not resultCommand["OK"]:
return resultCommand
result = resultCommand["Value"]
if result["Status"] != 0:
return S_ERROR("Failed job kill: %s" % result["Message"])
if result["Failed"]:
return S_ERROR("%d jobs failed killing" % len(result["Failed"]))
return S_OK(len(result["Successful"]))
def _getHostStatus(self, host=None):
"""Get jobs running at a given host"""
resultCommand = self.__executeHostCommand("getCEStatus", {}, host=host)
if not resultCommand["OK"]:
return resultCommand
result = resultCommand["Value"]
if result["Status"] != 0:
return S_ERROR("Failed to get CE status: %s" % result["Message"])
return S_OK(result)
def getCEStatus(self):
"""Method to return information on running and pending jobs."""
result = S_OK()
result["SubmittedJobs"] = self.submittedJobs
result["RunningJobs"] = 0
result["WaitingJobs"] = 0
resultHost = self._getHostStatus()
if not resultHost["OK"]:
return resultHost
result["RunningJobs"] = resultHost["Value"].get("Running", 0)
result["WaitingJobs"] = resultHost["Value"].get("Waiting", 0)
if "AvailableCores" in resultHost["Value"]:
result["AvailableCores"] = resultHost["Value"]["AvailableCores"]
self.log.verbose("Waiting Jobs: ", result["WaitingJobs"])
self.log.verbose("Running Jobs: ", result["RunningJobs"])
return result
def getJobStatus(self, jobIDList):
"""Get the status information for the given list of jobs"""
return self._getJobStatusOnHost(jobIDList)
def _getJobStatusOnHost(self, jobIDList, host=None):
"""Get the status information for the given list of jobs"""
resultDict = {}
jobDict = {}
for job in jobIDList:
stamp = os.path.basename(urlparse(job).path)
jobDict[stamp] = job
stampList = list(jobDict)
for jobList in breakListIntoChunks(stampList, 100):
resultCommand = self.__executeHostCommand("getJobStatus", {"JobIDList": jobList}, host=host)
if not resultCommand["OK"]:
return resultCommand
result = resultCommand["Value"]
if result["Status"] != 0:
return S_ERROR("Failed to get job status: %s" % result["Message"])
for stamp in result["Jobs"]:
resultDict[jobDict[stamp]] = result["Jobs"][stamp]
return S_OK(resultDict)
def _getJobOutputFiles(self, jobID, host=None):
"""Get output file names for the specific CE"""
jobStamp = os.path.basename(urlparse(jobID).path)
host = urlparse(jobID).hostname
if "OutputTemplate" in self.ceParameters:
self.outputTemplate = self.ceParameters["OutputTemplate"]
self.errorTemplate = self.ceParameters["ErrorTemplate"]
if self.outputTemplate:
output = self.outputTemplate % jobStamp
error = self.errorTemplate % jobStamp
elif "OutputTemplate" in self.ceParameters:
self.outputTemplate = self.ceParameters["OutputTemplate"]
self.errorTemplate = self.ceParameters["ErrorTemplate"]
output = self.outputTemplate % jobStamp
error = self.errorTemplate % jobStamp
elif hasattr(self.batchSystem, "getJobOutputFiles"):
# numberOfNodes is treated as a string as it can contain values such as "2-4"
# where 2 would represent the minimum number of nodes to allocate, and 4 the maximum
numberOfNodes = self.ceParameters.get("NumberOfNodes", "1")
commandOptions = {
"JobIDList": [jobStamp],
"OutputDir": self.batchOutput,
"ErrorDir": self.batchError,
"NumberOfNodes": numberOfNodes,
}
resultCommand = self.__executeHostCommand("getJobOutputFiles", commandOptions, host=host)
if not resultCommand["OK"]:
return resultCommand
result = resultCommand["Value"]
if result["Status"] != 0:
return S_ERROR("Failed to get job output files: %s" % result["Message"])
if "OutputTemplate" in result:
self.outputTemplate = result["OutputTemplate"]
self.errorTemplate = result["ErrorTemplate"]
output = result["Jobs"][jobStamp]["Output"]
error = result["Jobs"][jobStamp]["Error"]
else:
output = "%s/%s.out" % (self.batchOutput, jobStamp)
error = "%s/%s.err" % (self.batchError, jobStamp)
return S_OK((jobStamp, host, output, error))
def getJobOutput(self, jobID, localDir=None):
"""Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
self.log.verbose("Getting output for jobID", jobID)
result = self._getJobOutputFiles(jobID)
if not result["OK"]:
return result
jobStamp, host, outputFile, errorFile = result["Value"]
if localDir:
localOutputFile = "%s/%s.out" % (localDir, jobStamp)
localErrorFile = "%s/%s.err" % (localDir, jobStamp)
else:
localOutputFile = "Memory"
localErrorFile = "Memory"
# Take into account the SSHBatch possible SSHHost syntax
host = host.split("/")[0]
ssh = SSH(host=host, parameters=self.ceParameters)
result = ssh.scpCall(30, localOutputFile, outputFile, upload=False)
if not result["OK"]:
return result
result = ssh.scpCall(30, localErrorFile, errorFile, upload=False)
if not result["OK"]:
return result
if localDir:
output = localOutputFile
error = localErrorFile
else:
output = result["Value"][1]
error = result["Value"][1]
return S_OK((output, error))
| ic-hep/DIRAC | src/DIRAC/Resources/Computing/SSHComputingElement.py | Python | gpl-3.0 | 32,794 | [
"DIRAC"
] | 3a6c6bdaace8debf6ef41fb9baddf3548cf8949f8b7345f45011425a50c3208b |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/convert/txt_input.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(518, 353)
Form.setWindowTitle(_("Form"))
self.verticalLayout_3 = QtGui.QVBoxLayout(Form)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox_3 = QtGui.QGroupBox(Form)
self.groupBox_3.setTitle(_("Structure"))
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setText(_("Paragraph style:"))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.opt_paragraph_type = QtGui.QComboBox(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.opt_paragraph_type.sizePolicy().hasHeightForWidth())
self.opt_paragraph_type.setSizePolicy(sizePolicy)
self.opt_paragraph_type.setObjectName(_fromUtf8("opt_paragraph_type"))
self.gridLayout.addWidget(self.opt_paragraph_type, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setText(_("Formatting style:"))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.opt_formatting_type = QtGui.QComboBox(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.opt_formatting_type.sizePolicy().hasHeightForWidth())
self.opt_formatting_type.setSizePolicy(sizePolicy)
self.opt_formatting_type.setObjectName(_fromUtf8("opt_formatting_type"))
self.gridLayout.addWidget(self.opt_formatting_type, 1, 1, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox_3)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setTitle(_("Common"))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.opt_preserve_spaces = QtGui.QCheckBox(self.groupBox_2)
self.opt_preserve_spaces.setText(_("Preserve &spaces"))
self.opt_preserve_spaces.setObjectName(_fromUtf8("opt_preserve_spaces"))
self.verticalLayout_2.addWidget(self.opt_preserve_spaces)
self.opt_txt_in_remove_indents = QtGui.QCheckBox(self.groupBox_2)
self.opt_txt_in_remove_indents.setText(_("Remove indents at the beginning of lines"))
self.opt_txt_in_remove_indents.setObjectName(_fromUtf8("opt_txt_in_remove_indents"))
self.verticalLayout_2.addWidget(self.opt_txt_in_remove_indents)
self.verticalLayout_3.addWidget(self.groupBox_2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setTitle(_("Markdown"))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setText(_("<p>Markdown is a simple markup language for text files, that allows for advanced formatting. To learn more visit <a href=\"http://daringfireball.net/projects/markdown\">markdown</a>."))
self.label.setWordWrap(True)
self.label.setOpenExternalLinks(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.opt_markdown_disable_toc = QtGui.QCheckBox(self.groupBox)
self.opt_markdown_disable_toc.setText(_("Do not insert Table of Contents into output text when using markdown"))
self.opt_markdown_disable_toc.setObjectName(_fromUtf8("opt_markdown_disable_toc"))
self.verticalLayout.addWidget(self.opt_markdown_disable_toc)
self.verticalLayout_3.addWidget(self.groupBox)
spacerItem = QtGui.QSpacerItem(20, 213, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
pass
| yeyanchao/calibre | src/calibre/gui2/convert/txt_input_ui.py | Python | gpl-3.0 | 5,620 | [
"VisIt"
] | edbb56d9c5249f65164e4bb2c93ed12a9bdfbb170764646bc2f0ea6da7d2a346 |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolution layer tests
"""
import numpy as np
from neon import NervanaObject
from neon.backends import gen_backend
from neon.layers import Sequential, Conv, Pooling, BranchNode, Affine, Tree
from neon.initializers.initializer import Gaussian, Constant
from neon.transforms import Rectlin
init1 = Gaussian(scale=0.01)
relu = Rectlin()
bias = Constant(0)
common = dict(activation=relu, init=init1, bias=bias)
commonp1 = dict(activation=relu, init=init1, bias=bias, padding=1)
commonp3s2 = dict(activation=relu, init=init1, bias=bias, padding=3, strides=2)
pool2s1p1 = dict(fshape=2, padding=1, strides=1)
batch_size = 64
def make_tree(trunk, branch1, branch2, alphas):
# Make one copy that is the Tree version
_trunk = [l['layer'](**l['config']) for l in trunk]
bnode = [BranchNode(name='bnode')]
_branch1 = [l['layer'](**l['config']) for l in branch1]
_branch2 = [l['layer'](**l['config']) for l in branch2]
v1 = Tree([_trunk + bnode + _branch1, bnode + _branch2], alphas)
# Now a second copy with no sharing as the reference version
_trunkb = [l['layer'](**l['config']) for l in trunk]
_branch1b = [l['layer'](**l['config']) for l in branch1]
_branch2b = [l['layer'](**l['config']) for l in branch2]
return (v1, _trunkb, _branch1b, _branch2b)
def test_branch_model():
NervanaObject.be = gen_backend("gpu", batch_size=64)
be = NervanaObject.be
trunk = [{'layer': Conv, 'config': dict(fshape=(5, 5, 16), **common)},
{'layer': Pooling, 'config': dict(op='max', **pool2s1p1)}]
branch1 = [{'layer': Conv, 'config': dict(fshape=(5, 5, 32), **common)},
{'layer': Pooling, 'config': dict(op='max', **pool2s1p1)},
{'layer': Affine, 'config': dict(nout=200, **common)},
{'layer': Affine, 'config': dict(nout=10, init=init1, activation=relu)}]
branch2 = [{'layer': Conv, 'config': dict(fshape=(3, 3, 32), **common)},
{'layer': Pooling, 'config': dict(op='max', **pool2s1p1)},
{'layer': Affine, 'config': dict(nout=256, **common)},
{'layer': Affine, 'config': dict(nout=10, init=init1, activation=relu)}]
alphas = [1, 1]
neon_layer, t, b1, b2 = make_tree(trunk, branch1, branch2, alphas)
inshape = (16, 32, 32)
insize = np.prod(inshape)
# Let's force bprop deltas computation for
inpa = np.random.random((insize, batch_size))
inp = be.array(inpa)
neon_layer.configure(inshape)
neon_layer.allocate()
neon_layer.allocate_deltas()
neon_out = [i.get() for i in neon_layer.fprop(inp)]
ref_layers = [Sequential(t), Sequential(b1), Sequential(b2)]
ref_layers[0].configure(inshape)
ref_layers[1].configure(ref_layers[0].out_shape)
ref_layers[2].configure(ref_layers[0].out_shape)
[r.allocate() for r in ref_layers]
[r.allocate_deltas() for r in ref_layers]
# Now copy the weights
ref_all_layers = ref_layers[0].layers + ref_layers[1].layers + ref_layers[2].layers
ref_weight_layers = [l for l in ref_all_layers if l.has_params]
neon_weight_layers = neon_layer.layers_to_optimize
for rl, nl in zip(ref_weight_layers, neon_weight_layers):
rl.set_params(nl.W.get())
# Forward prop
inp_middle = ref_layers[0].fprop(inp)
ref_out = [r.fprop(inp_middle).get() for r in ref_layers[1:]]
for h, r in zip(neon_out, ref_out):
difference = np.max(np.abs(h-r))
assert(difference < 1e-9)
# Back prop
erra = [np.random.random(ll.shape) for ll in neon_out]
err = [be.array(e) for e in erra]
input_layer = neon_layer.layers[0].layers[0] # reference the trunk, then the root
input_layer.prev_layer = True
input_layer.set_deltas([be.iobuf(inshape)])
neon_layer.bprop(err)
errp = input_layer.deltas.get()
for i, r in enumerate(ref_layers):
r.layers[0].prev_layer = True
_inshape = inshape if i == 0 else ref_layers[0].out_shape
r.layers[0].set_deltas([be.iobuf(_inshape)])
joined_err = be.iobuf(ref_layers[0].out_shape)
branch_errs = [r.bprop(e, a) for r, e, a in reversed(zip(ref_layers[1:], err, alphas))]
joined_err[:] = branch_errs[0] + branch_errs[1]
err_ref = ref_layers[0].bprop(joined_err).get()
difference = np.max(np.abs(err_ref - errp))
print difference
assert(difference < 1e-9)
if __name__ == '__main__':
test_branch_model()
| Bam4d/neon | tests/test_branch_layer.py | Python | apache-2.0 | 5,155 | [
"Gaussian"
] | e4afefa893183dd370116aefb00d8c975ac73cc804981d89e3a2f9b36baf2342 |
# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""astroid manager: avoid multiple astroid build of a same module when
possible by providing a class responsible to get astroid representation
from various source and using a cache of built modules)
"""
import os
import sys
import zipimport
import six
from astroid import exceptions
from astroid.interpreter._import import spec
from astroid import modutils
from astroid import transforms
from astroid import util
def safe_repr(obj):
try:
return repr(obj)
except Exception: # pylint: disable=broad-except
return '???'
class AstroidManager(object):
"""the astroid manager, responsible to build astroid from files
or modules.
Use the Borg pattern.
"""
name = 'astroid loader'
brain = {}
def __init__(self):
self.__dict__ = AstroidManager.brain
if not self.__dict__:
# NOTE: cache entries are added by the [re]builder
self.astroid_cache = {}
self._mod_file_cache = {}
self._failed_import_hooks = []
self.always_load_extensions = False
self.optimize_ast = False
self.extension_package_whitelist = set()
self._transform = transforms.TransformVisitor()
# Export these APIs for convenience
self.register_transform = self._transform.register_transform
self.unregister_transform = self._transform.unregister_transform
def visit_transforms(self, node):
"""Visit the transforms and apply them to the given *node*."""
return self._transform.visit(node)
def ast_from_file(self, filepath, modname=None, fallback=True, source=False):
"""given a module name, return the astroid object"""
try:
filepath = modutils.get_source_file(filepath, include_no_ext=True)
source = True
except modutils.NoSourceFile:
pass
if modname is None:
try:
modname = '.'.join(modutils.modpath_from_file(filepath))
except ImportError:
modname = filepath
if modname in self.astroid_cache and self.astroid_cache[modname].file == filepath:
return self.astroid_cache[modname]
if source:
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).file_build(filepath, modname)
elif fallback and modname:
return self.ast_from_module_name(modname)
raise exceptions.AstroidBuildingError(
'Unable to build an AST for {path}.', path=filepath)
def _build_stub_module(self, modname):
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).string_build('', modname)
def _build_namespace_module(self, modname, path):
from astroid.builder import build_namespace_package_module
return build_namespace_package_module(modname, path)
def _can_load_extension(self, modname):
if self.always_load_extensions:
return True
if modutils.is_standard_module(modname):
return True
parts = modname.split('.')
return any(
'.'.join(parts[:x]) in self.extension_package_whitelist
for x in range(1, len(parts) + 1))
def ast_from_module_name(self, modname, context_file=None):
"""given a module name, return the astroid object"""
if modname in self.astroid_cache:
return self.astroid_cache[modname]
if modname == '__main__':
return self._build_stub_module(modname)
old_cwd = os.getcwd()
if context_file:
os.chdir(os.path.dirname(context_file))
try:
found_spec = self.file_from_module_name(modname, context_file)
# pylint: disable=no-member
if found_spec.type == spec.ModuleType.PY_ZIPMODULE:
# pylint: disable=no-member
module = self.zip_import_data(found_spec.location)
if module is not None:
return module
elif found_spec.type in (spec.ModuleType.C_BUILTIN,
spec.ModuleType.C_EXTENSION):
# pylint: disable=no-member
if (found_spec.type == spec.ModuleType.C_EXTENSION
and not self._can_load_extension(modname)):
return self._build_stub_module(modname)
try:
module = modutils.load_module_from_name(modname)
except Exception as ex: # pylint: disable=broad-except
util.reraise(exceptions.AstroidImportError(
'Loading {modname} failed with:\n{error}',
modname=modname, path=found_spec.location, error=ex))
return self.ast_from_module(module, modname)
elif found_spec.type == spec.ModuleType.PY_COMPILED:
raise exceptions.AstroidImportError(
"Unable to load compiled module {modname}.",
# pylint: disable=no-member
modname=modname, path=found_spec.location)
elif found_spec.type == spec.ModuleType.PY_NAMESPACE:
return self._build_namespace_module(modname,
# pylint: disable=no-member
found_spec.submodule_search_locations)
# pylint: disable=no-member
if found_spec.location is None:
raise exceptions.AstroidImportError(
"Can't find a file for module {modname}.",
modname=modname)
# pylint: disable=no-member
return self.ast_from_file(found_spec.location, modname, fallback=False)
except exceptions.AstroidBuildingError as e:
for hook in self._failed_import_hooks:
try:
return hook(modname)
except exceptions.AstroidBuildingError:
pass
raise e
finally:
os.chdir(old_cwd)
def zip_import_data(self, filepath):
if zipimport is None:
return None
from astroid.builder import AstroidBuilder
builder = AstroidBuilder(self)
for ext in ('.zip', '.egg'):
try:
eggpath, resource = filepath.rsplit(ext + os.path.sep, 1)
except ValueError:
continue
try:
importer = zipimport.zipimporter(eggpath + ext)
zmodname = resource.replace(os.path.sep, '.')
if importer.is_package(resource):
zmodname = zmodname + '.__init__'
module = builder.string_build(importer.get_source(resource),
zmodname, filepath)
return module
except Exception: # pylint: disable=broad-except
continue
return None
def file_from_module_name(self, modname, contextfile):
try:
value = self._mod_file_cache[(modname, contextfile)]
traceback = sys.exc_info()[2]
except KeyError:
try:
value = modutils.file_info_from_modpath(
modname.split('.'), context_file=contextfile)
traceback = sys.exc_info()[2]
except ImportError as ex:
value = exceptions.AstroidImportError(
'Failed to import module {modname} with error:\n{error}.',
modname=modname, error=ex)
traceback = sys.exc_info()[2]
self._mod_file_cache[(modname, contextfile)] = value
if isinstance(value, exceptions.AstroidBuildingError):
six.reraise(exceptions.AstroidBuildingError,
value, traceback)
return value
def ast_from_module(self, module, modname=None):
"""given an imported module, return the astroid object"""
modname = modname or module.__name__
if modname in self.astroid_cache:
return self.astroid_cache[modname]
try:
# some builtin modules don't have __file__ attribute
filepath = module.__file__
if modutils.is_python_source(filepath):
return self.ast_from_file(filepath, modname)
except AttributeError:
pass
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).module_build(module, modname)
def ast_from_class(self, klass, modname=None):
"""get astroid for the given class"""
if modname is None:
try:
modname = klass.__module__
except AttributeError:
util.reraise(exceptions.AstroidBuildingError(
'Unable to get module for class {class_name}.',
cls=klass, class_repr=safe_repr(klass), modname=modname))
modastroid = self.ast_from_module_name(modname)
return modastroid.getattr(klass.__name__)[0] # XXX
def infer_ast_from_something(self, obj, context=None):
"""infer astroid for the given class"""
if hasattr(obj, '__class__') and not isinstance(obj, type):
klass = obj.__class__
else:
klass = obj
try:
modname = klass.__module__
except AttributeError:
util.reraise(exceptions.AstroidBuildingError(
'Unable to get module for {class_repr}.',
cls=klass, class_repr=safe_repr(klass)))
except Exception as ex: # pylint: disable=broad-except
util.reraise(exceptions.AstroidImportError(
'Unexpected error while retrieving module for {class_repr}:\n'
'{error}', cls=klass, class_repr=safe_repr(klass), error=ex))
try:
name = klass.__name__
except AttributeError:
util.reraise(exceptions.AstroidBuildingError(
'Unable to get name for {class_repr}:\n',
cls=klass, class_repr=safe_repr(klass)))
except Exception as ex: # pylint: disable=broad-except
util.reraise(exceptions.AstroidImportError(
'Unexpected error while retrieving name for {class_repr}:\n'
'{error}', cls=klass, class_repr=safe_repr(klass), error=ex))
# take care, on living object __module__ is regularly wrong :(
modastroid = self.ast_from_module_name(modname)
if klass is obj:
for inferred in modastroid.igetattr(name, context):
yield inferred
else:
for inferred in modastroid.igetattr(name, context):
yield inferred.instantiate_class()
def register_failed_import_hook(self, hook):
"""Registers a hook to resolve imports that cannot be found otherwise.
`hook` must be a function that accepts a single argument `modname` which
contains the name of the module or package that could not be imported.
If `hook` can resolve the import, must return a node of type `astroid.Module`,
otherwise, it must raise `AstroidBuildingError`.
"""
self._failed_import_hooks.append(hook)
def cache_module(self, module):
"""Cache a module if no module with the same name is known yet."""
self.astroid_cache.setdefault(module.name, module)
def clear_cache(self, astroid_builtin=None):
# XXX clear transforms
self.astroid_cache.clear()
# force bootstrap again, else we may ends up with cache inconsistency
# between the manager and CONST_PROXY, making
# unittest_lookup.LookupTC.test_builtin_lookup fail depending on the
# test order
import astroid.raw_building
astroid.raw_building._astroid_bootstrapping(
astroid_builtin=astroid_builtin)
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/manager.py | Python | apache-2.0 | 12,332 | [
"VisIt"
] | 6ee26a1d88f3dd42783a5c2db0a5285e51e7f13ff1f6f87b422f3a09e6976905 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 1 - Planning
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="gregcaporaso@gmail.com",
maintainer="scikit-bio development team",
maintainer_email="gregcaporaso@gmail.com",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
install_requires=['numpy', 'matplotlib',
'scipy', 'pandas', 'future', 'six',
'natsort', 'IPython', 'decorator',
'CacheControl[FileCache]'],
extras_require={'test': ["nose", "pep8", "flake8", "python-dateutil"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.io.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
| Achuth17/scikit-bio | setup.py | Python | bsd-3-clause | 4,506 | [
"scikit-bio"
] | 4f9b94906f77281f1557d00bae615ebdc4bc4e3fcbf22dbccc7b57eb825da54f |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit:
"""
from __future__ import unicode_literals, division, print_function
import os
import shutil
import time
import abc
import collections
import numpy as np
import six
import copy
from six.moves import filter
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
from monty.dev import deprecated
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .nodes import Dependency, Node, NodeError, NodeResults, check_spectator
from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, DdkTask,
BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask,
EphTask, CollinearThenNonCollinearScfTask)
from .utils import Directory
from .netcdf import ETSF_Reader, NetcdfReader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super(WorkResults, cls).from_node(work)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(NodeError):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(six.with_metaclass(abc.ABCMeta, Node)):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
def disconnect_signals(self):
"""
Disable the signals within the work. This function reverses the process of `connect_signals`
"""
for task in self:
try:
dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task)
except dispatcher.errors.DispatcherKeyError as exc:
logger.debug(str(exc))
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all task in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self.finalized = True
try:
results = AttrDict(**self.on_all_ok())
except Exception as exc:
self.history.critical("on_all_ok raises %s" % str(exc))
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
if self._finalized:
self.send_signal(self.S_OK)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
#@check_spectator
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all the tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
class NodeContainer(six.with_metaclass(abc.ABCMeta)):
"""
Mixin classes for `Work` and `Flow` objects providing helper functions
to register tasks in the container. The helper function call the
`register` method of the container.
"""
# TODO: Abstract protocol for containers
@abc.abstractmethod
def register_task(self, *args, **kwargs):
"""
Register a task in the container.
"""
# TODO: shall flow.register_task return a Task or a Work?
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register_task(*args, **kwargs)
def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs):
"""Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2"""
kwargs["task_class"] = CollinearThenNonCollinearScfTask
return self.register_task(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register_task(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register_task(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register_task(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register_task(*args, **kwargs)
# TODO: Remove
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs)
def register_bec_task(self, *args, **kwargs):
"""Register a BEC task."""
kwargs["task_class"] = BecTask
return self.register_task(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a Bethe-Salpeter task."""
kwargs["task_class"] = BseTask
return self.register_task(*args, **kwargs)
def register_eph_task(self, *args, **kwargs):
"""Register an electron-phonon task."""
kwargs["task_class"] = EphTask
return self.register_task(*args, **kwargs)
def walknset_vars(self, task_class=None, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
"""
def change_task(task):
if task_class is not None and task.__class__ is not task_class: return False
return True
if self.is_work:
for task in self:
if not change_task(task): continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if not change_task(task): continue
task.set_vars(*args, **kwargs)
else:
raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
class Work(BaseWork, NodeContainer):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super(Work, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work.
task.set_manager(manager) if manager is not None else task.set_manager(self.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`AbinitInput` instance.
deps: Dictionary specifying the dependency of this node.
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies.
if deps is not None:
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Needed by NodeContainer
register_task = register
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
# Ignore OK and LOCKED tasks.
for task in self:
if task.status in (task.S_OK, task.S_LOCKED): continue
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status == task.S_LOCKED: continue
if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status):
task.set_status(task.S_READY, "Status set to Ready")
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run
nscf_input: Input for the NSCF run defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(BandStructureWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.plot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(RelaxWork, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters if paral_kgb == 1
#paral_kgb = ion_input[0]["paral_kgb"]
#if paral_kgb == 1:
#deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf
#deps = {self.ion_task: "DEN"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.lock(source_node=self)
self.transfer_done = False
self.target_dilatmx = target_dilatmx
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.get_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
logger.info('Converging dilatmx. Value reduce from {} to {}.'
.format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))
self.ioncell_task.reset_from_scratch()
return super(RelaxWork, self).on_ok(sender)
def plot_ion_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ion_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
def plot_ioncell_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ioncell_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
class G0W0Work(Work):
"""
Work for general G0W0 calculations.
All input can be either single inputs or lists of inputs
"""
def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs,
workdir=None, manager=None):
"""
Args:
scf_inputs: Input for the SCF run, if it is a list add all but only link
to the last input (used for convergence studies on the KS band gap)
nscf_inputs: Input for the NSCF run, if it is a list add all but only
link to the last (i.e. addditiona DOS and BANDS)
scr_inputs: Input for the screening run
sigma_inputs: List of :class:AbinitInput`for the self-energy run.
if scr and sigma are lists of the same length every sigma gets it's own screening
if there is only one screening all sigma's link to this one
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
super(G0W0Work, self).__init__(workdir=workdir, manager=manager)
if isinstance(sigma_inputs, list) and isinstance(scr_inputs, list) and len(sigma_inputs) == len(scr_inputs):
spread_scr = True
else:
spread_scr = False
self.sigma_tasks = []
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
# multiple scf_inputs can be provided to perform convergence studies
if isinstance(scf_inputs, list):
for scf_input in scf_inputs:
self.scf_task = self.register_scf_task(scf_input)
else:
self.scf_task = self.register_scf_task(scf_inputs)
# Register the NSCF run (s).
if isinstance(nscf_inputs, list):
for nscf_input in nscf_inputs:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
else:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"})
# Register the SCR and SIGMA run(s).
if spread_scr:
for scr_input, sigma_input in zip(scr_inputs, sigma_inputs):
scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(sigma_task)
else:
scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"})
for sigma_input in sigma_inputs:
task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of :class:`AbinitInput` for the self-energy runs.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super(SigmaConvWork, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run.
nscf_input: Input for the NSCF run.
bse_inputs: List of Inputs for the BSE run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super(BseMdfWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
def get_mdf_robot(self):
"""Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files."""
from abilab.robots import MdfRobot
robot = MdfRobot()
for task in self[2:]:
mdf_path = task.outdir.has_abiext(robot.EXT)
if mdf_path:
robot.add_file(str(task), mdf_path)
return robot
#def plot_conv_mdf(self, **kwargs)
# with self.get_mdf_robot() as robot:
# robot.get_mdf_plooter()
# plotter.plot(**kwargs)
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
#w.rmtree()
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
logger.debug("will call mrgscr to merge %s:\n" % str(scr_files))
assert len(scr_files) == len(self)
mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1)
final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out")
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
@deprecated(message="This class is deprecated and will be removed in pymatgen 4.0. Use PhononWork")
def build_oneshot_phononwork(scf_input, ph_inputs, workdir=None, manager=None, work_class=None):
"""
Returns a work for the computation of phonon frequencies
ph_inputs is a list of input for Phonon calculation in which all the independent perturbations
are explicitly computed i.e.
* rfdir 1 1 1
* rfatpol 1 natom
.. warning::
This work is mainly used for simple calculations, e.g. convergence studies.
Use :class:`PhononWork` for better efficiency.
"""
work_class = OneShotPhononWork if work_class is None else work_class
work = work_class(workdir=workdir, manager=manager)
scf_task = work.register_scf_task(scf_input)
ph_inputs = [ph_inputs] if not isinstance(ph_inputs, (list, tuple)) else ph_inputs
for phinp in ph_inputs:
# Check rfdir and rfatpol.
rfdir = np.array(phinp.get("rfdir", [0, 0, 0]))
if len(rfdir) != 3 or any(rfdir != (1, 1, 1)):
raise ValueError("Expecting rfdir == (1, 1, 1), got %s" % rfdir)
rfatpol = np.array(phinp.get("rfatpol", [1, 1]))
if len(rfatpol) != 2 or any(rfatpol != (1, len(phinp.structure))):
raise ValueError("Expecting rfatpol == (1, natom), got %s" % rfatpol)
# cannot use PhononTaks here because the Task is not able to deal with multiple phonon calculations
ph_task = work.register(phinp, deps={scf_task: "WFK"})
return work
class OneShotPhononWork(Work):
"""
Simple and very inefficient work for the computation of the phonon frequencies
It consists of a GS task and a DFPT calculations for all the independent perturbations.
The main advantage is that one has direct access to the phonon frequencies that
can be computed at the end of the second task without having to call anaddb.
Use ``build_oneshot_phononwork`` to construct this work from the input files.
"""
@deprecated(message="This class is deprecated and will be removed in pymatgen 4.0. Use PhononWork")
def read_phonons(self):
"""
Read phonon frequencies from the output file.
Return:
List of namedtuples. Each `namedtuple` has the following attributes:
- qpt: ndarray with the q-point in reduced coordinates.
- freqs: ndarray with 3 x Natom phonon frequencies in meV
"""
#
# Phonon wavevector (reduced coordinates) : 0.00000 0.00000 0.00000
# Phonon energies in Hartree :
# 1.089934E-04 4.990512E-04 1.239177E-03 1.572715E-03 1.576801E-03
# 1.579326E-03
# Phonon frequencies in cm-1 :
# - 2.392128E+01 1.095291E+02 2.719679E+02 3.451711E+02 3.460677E+02
# - 3.466221E+02
BEGIN = " Phonon wavevector (reduced coordinates) :"
END = " Phonon frequencies in cm-1 :"
ph_tasks, qpts, phfreqs = self[1:], [], []
for task in ph_tasks:
# Parse output file.
with open(task.output_file.path, "r") as fh:
qpt, inside = None, 0
for line in fh:
if line.startswith(BEGIN):
qpts.append([float(s) for s in line[len(BEGIN):].split()])
inside, omegas = 1, []
elif line.startswith(END):
break
elif inside:
inside += 1
if inside > 2:
omegas.extend((float(s) for s in line.split()))
else:
raise ValueError("Cannot find %s in file %s" % (END, task.output_file.path))
phfreqs.append(omegas)
# Use namedtuple to store q-point and frequencies in meV
phonon = collections.namedtuple("phonon", "qpt freqs")
return [phonon(qpt=qpt, freqs=freqs_meV) for qpt, freqs_meV in zip(qpts, EnergyArray(phfreqs, "Ha").to("meV") )]
def get_results(self, **kwargs):
results = super(OneShotPhononWork, self).get_results()
phonons = self.read_phonons()
results.update(phonons=phonons)
return results
class MergeDdb(object):
"""Mixin class for Works that have to merge the DDB files produced by the tasks."""
def merge_ddb_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Returns:
path to the output DDB file
"""
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in self \
if isinstance(task, DfptTask)]))
self.history.info("Will call mrgddb to merge %s:\n" % str(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
return out_ddb
class PhononWork(Work, MergeDdb):
"""
This work usually consists of one GS + nirred Phonon tasks where nirred is
the number of irreducible perturbations for a given q-point.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, qpoints, tolerance=None, manager=None):
"""
Construct a `PhononWork` from a :class:`ScfTask` object.
The input file for phonons is automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by scf_task.
Args:
scf_task: ScfTask object.
qpoints: q-points in reduced coordinates. Accepts single q-point or list of q-points
tolerance: dict {varname: value} with the tolerance to be used in the DFPT run.
Defaults to {"tolvrs": 1.0e-10}.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from ScfTask" % scf_task)
qpoints = np.reshape(qpoints, (-1,3))
new = cls(manager=manager)
for qpt in qpoints:
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
@classmethod
def from_scf_input(cls, scf_input, qpoints, tolerance=None, manager=None):
"""
Similar to `from_scf_task`, the difference is that this method requires
an input for SCF calculation instead of a ScfTask. All the tasks (Scf + Phonon)
are packed in a single Work whereas in the previous case we usually have multiple works.
"""
qpoints = np.reshape(qpoints, (-1,3))
new = cls(manager=manager)
scf_task = new.register_scf_task(scf_input)
for qpt in qpoints:
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
def merge_pot1_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
pot1_files.extend(task.outdir.list_filepaths(wildcard="*_POT*"))
# prtpot=0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s:\n" % str(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb)
return out_dvdb
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
class BecWork(Work, MergeDdb):
"""
Work for the computation of the Born effective charges.
This work consists of DDK tasks and phonon + electric field perturbation
It provides the callback method (on_all_ok) that calls mrgddb to merge the
partial DDB files produced by the work.
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None):
"""Build a BecWork from a ground-state task."""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from GsTask" % scf_task)
new = cls() #manager=scf_task.manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each bec task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs() #tolerance=efile
for bec_inp in bec_inputs:
new.register_bec_task(bec_inp, deps=bec_deps)
return new
def on_all_ok(self):
"""
This method is called when all the task reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
| aykol/pymatgen | pymatgen/io/abinit/works.py | Python | mit | 53,283 | [
"ABINIT",
"Gaussian",
"NetCDF",
"pymatgen"
] | bd20da71957810dbb83240aed91f4243fee3754a77d913e4aeb8e82b6813530f |
#### PATTERN | DE | INFLECT ##############################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
##########################################################################
# Regular expressions-based rules for German word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - attributive and predicative of adjectives,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX German morphology word forms):
# 75% for gender()
# 72% for pluralize()
# 84% for singularize() (for nominative)
# 87% for Verbs.find_lemma()
# 87% for Verbs.find_lexeme()
# 98% for predicative
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
INDICATIVE, IMPERATIVE, SUBJUNCTIVE,
PROGRESSIVE,
PARTICIPLE, GERUND
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #############################################################
# German inflection of depends on gender, role and number + the determiner
# (if any).
# Inflection gender.
# Masculine is the most common, so it is the default for all functions.
MASCULINE, FEMININE, NEUTER, PLURAL = \
MALE, FEMALE, NEUTRAL, PLURAL = \
M, F, N, PL = "m", "f", "n", "p"
# Inflection role.
# - nom = subject, "Der Hund bellt" (the dog barks).
# - acc = object, "Das Mädchen küsst den Hund" (the girl kisses the dog).
# - dat = object (indirect), "Der Mann gibt einen Knochen zum Hund" (the man gives the dog a bone).
# - gen = property, "die Knochen des Hundes" (the dog's bone).
NOMINATIVE, ACCUSATIVE, DATIVE, GENITIVE = SUBJECT, OBJECT, INDIRECT, PROPERTY = \
"nominative", "accusative", "dative", "genitive"
article_definite = {
("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das", ("p", "nom"): "die",
("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das", ("p", "acc"): "die",
("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem", ("p", "dat"): "den",
("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des", ("p", "gen"): "der",
}
article_indefinite = {
("m", "nom"): "ein", ("f", "nom"): "eine", ("n", "nom"): "ein", ("p", "nom"): "eine",
("m", "acc"): "einen", ("f", "acc"): "eine", ("n", "acc"): "ein", ("p", "acc"): "eine",
("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem", ("p", "dat"): "einen",
("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines", ("p", "gen"): "einer",
}
def definite_article(word, gender=MALE, role=SUBJECT):
"""Returns the definite article (der/die/das/die) for a given word."""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
def indefinite_article(word, gender=MALE, role=SUBJECT):
"""Returns the indefinite article (ein) for a given word."""
return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
DEFINITE = "definite"
INDEFINITE = "indefinite"
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
"""Returns the indefinite (ein) or definite (der/die/das/die) article for
the given word."""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role)
_article = article
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT):
"""Returns a string with the article + the word."""
return "%s %s" % (_article(word, article, gender, role), word)
#### GENDER ##############################################################
gender_masculine = (
"ant", "ast", "ich", "ig", "ismus", "ling", "or", "us"
)
gender_feminine = (
"a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion", "sis",
u"tät", "tion", "ung", "ur"
)
gender_neuter = (
"chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um", "al", "an", "ar",
u"ät", "ent", "ett", "ier", "iv", "o", "on", "nis", "sal"
)
gender_majority_vote = {
MASCULINE: (
"ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb", "ef", "eg",
"el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir", "kt", "lf",
"li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of", "og", "or",
"pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr", "rs", "rt",
"rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt", "zt"
),
FEMININE: (
"be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge", "he", "hr",
"ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me", "na", "ne",
"ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve", "ze"
),
NEUTER: (
"ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im", "io", "is",
"iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns", "ol", "om",
"op", "os", "ot", "pt", "rk", "rn", "ro", "to", "tt", "ul", "um", "uz"
)
}
def gender(word, pos=NOUN):
"""Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g
#### PLURALIZE ###########################################################
plural_inflections = [
("aal", u"äle"), ("aat", "aaten"), ("abe",
"aben"), ("ach", u"ächer"), ("ade", "aden"),
("age", "agen"), ("ahn", "ahnen"), ("ahr",
"ahre"), ("akt", "akte"), ("ale", "alen"),
("ame", "amen"), ("amt", u"ämter"), ("ane",
"anen"), ("ang", u"änge"), ("ank", u"änke"),
("ann", u"änner"), ("ant", "anten"), ("aph",
"aphen"), ("are", "aren"), ("arn", "arne"),
("ase", "asen"), ("ate", "aten"), ("att",
u"ätter"), ("atz", u"ätze"), ("aum", "äume"),
("aus", u"äuser"), ("bad", u"bäder"), ("bel",
"bel"), ("ben", "ben"), ("ber", "ber"),
("bot", "bote"), ("che", "chen"), ("chs",
"chse"), ("cke", "cken"), ("del", "del"),
("den", "den"), ("der", "der"), ("ebe",
"ebe"), ("ede", "eden"), ("ehl", "ehle"),
("ehr", "ehr"), ("eil", "eile"), ("eim",
"eime"), ("eis", "eise"), ("eit", "eit"),
("ekt", "ekte"), ("eld", "elder"), ("ell",
"elle"), ("ene", "enen"), ("enz", "enzen"),
("erd", "erde"), ("ere", "eren"), ("erk",
"erke"), ("ern", "erne"), ("ert", "erte"),
("ese", "esen"), ("ess", "esse"), ("est",
"este"), ("etz", "etze"), ("eug", "euge"),
("eur", "eure"), ("fel", "fel"), ("fen",
"fen"), ("fer", "fer"), ("ffe", "ffen"),
("gel", "gel"), ("gen", "gen"), ("ger",
"ger"), ("gie", "gie"), ("hen", "hen"),
("her", "her"), ("hie", "hien"), ("hle",
"hlen"), ("hme", "hmen"), ("hne", "hnen"),
("hof", u"höfe"), ("hre", "hren"), ("hrt",
"hrten"), ("hse", "hsen"), ("hte", "hten"),
("ich", "iche"), ("ick", "icke"), ("ide",
"iden"), ("ieb", "iebe"), ("ief", "iefe"),
("ieg", "iege"), ("iel", "iele"), ("ien",
"ium"), ("iet", "iete"), ("ife", "ifen"),
("iff", "iffe"), ("ift", "iften"), ("ige",
"igen"), ("ika", "ikum"), ("ild", "ilder"),
("ilm", "ilme"), ("ine", "inen"), ("ing",
"inge"), ("ion", "ionen"), ("ise", "isen"),
("iss", "isse"), ("ist", "isten"), ("ite",
"iten"), ("itt", "itte"), ("itz", "itze"),
("ium", "ium"), ("kel", "kel"), ("ken",
"ken"), ("ker", "ker"), ("lag", u"läge"),
("lan", u"läne"), ("lar", "lare"), ("lei",
"leien"), ("len", "len"), ("ler", "ler"),
("lge", "lgen"), ("lie", "lien"), ("lle",
"llen"), ("mel", "mel"), ("mer", "mer"),
("mme", "mmen"), ("mpe", "mpen"), ("mpf",
"mpfe"), ("mus", "mus"), ("mut", "mut"),
("nat", "nate"), ("nde", "nden"), ("nen",
"nen"), ("ner", "ner"), ("nge", "ngen"),
("nie", "nien"), ("nis", "nisse"), ("nke",
"nken"), ("nkt", "nkte"), ("nne", "nnen"),
("nst", "nste"), ("nte", "nten"), ("nze",
"nzen"), ("ock", u"öcke"), ("ode", "oden"),
("off", "offe"), ("oge", "ogen"), ("ohn",
u"öhne"), ("ohr", "ohre"), ("olz", u"ölzer"),
("one", "onen"), ("oot", "oote"), ("opf",
u"öpfe"), ("ord", "orde"), ("orm", "ormen"),
("orn", u"örner"), ("ose", "osen"), ("ote",
"oten"), ("pel", "pel"), ("pen", "pen"),
("per", "per"), ("pie", "pien"), ("ppe",
"ppen"), ("rag", u"räge"), ("rau", u"raün"),
("rbe", "rben"), ("rde", "rden"), ("rei",
"reien"), ("rer", "rer"), ("rie", "rien"),
("rin", "rinnen"), ("rke", "rken"), ("rot",
"rote"), ("rre", "rren"), ("rte", "rten"),
("ruf", "rufe"), ("rzt", "rzte"), ("sel",
"sel"), ("sen", "sen"), ("ser", "ser"),
("sie", "sien"), ("sik", "sik"), ("sse",
"ssen"), ("ste", "sten"), ("tag", "tage"),
("tel", "tel"), ("ten", "ten"), ("ter",
"ter"), ("tie", "tien"), ("tin", "tinnen"),
("tiv", "tive"), ("tor", "toren"), ("tte",
"tten"), ("tum", "tum"), ("tur", "turen"),
("tze", "tzen"), ("ube", "uben"), ("ude",
"uden"), ("ufe", "ufen"), ("uge", "ugen"),
("uhr", "uhren"), ("ule", "ulen"), ("ume",
"umen"), ("ung", "ungen"), ("use", "usen"),
("uss", u"üsse"), ("ute", "uten"), ("utz",
"utz"), ("ver", "ver"), ("weg", "wege"),
("zer", "zer"), ("zug", u"züge"), (u"ück", u"ücke")
]
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
"""Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if pos == NOUN:
for a, b in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rules (baseline = 69%).
if w.startswith("ge"):
return w
if w.endswith("gie"):
return w
if w.endswith("e"):
return w + "n"
if w.endswith("ien"):
return w[:-2] + "um"
if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")):
return w
if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
return w + "en"
if w.endswith("in"):
return w + "nen"
if w.endswith("nis"):
return w + "se"
if w.endswith(("eld", "ild", "ind")):
return w + "er"
if w.endswith("o"):
return w + "s"
if w.endswith("a"):
return w[:-1] + "en"
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
umlaut = w[-3]
umlaut = umlaut.replace("a", u"ä")
umlaut = umlaut.replace("o", u"ö")
umlaut = umlaut.replace("u", u"ü")
return w[:-3] + umlaut + w[-2:] + "e"
for a, b in (
("ag", u"äge"),
("ann", u"änner"),
("aum", u"äume"),
("aus", u"äuser"),
("zug", u"züge")):
if w.endswith(a):
return w[:-len(a)] + b
return w + "e"
return w
#### SINGULARIZE #########################################################
singular_inflections = [
("innen", "in"), (u"täten", u"tät"), ("ahnen",
"ahn"), ("enten", "ent"), (u"räser", "ras"),
("hrten", "hrt"), (u"ücher", "uch"), (u"örner",
"orn"), (u"änder", "and"), (u"ürmer", "urm"),
("ahlen", "ahl"), ("uhren", "uhr"), (u"ätter",
"att"), ("suren", "sur"), ("chten", "cht"),
("kuren", "kur"), ("erzen", "erz"), (u"güter",
"gut"), ("soren", "sor"), (u"änner", "ann"),
(u"äuser", "aus"), ("taten", "tat"), ("isten",
"ist"), (u"bäder", "bad"), (u"ämter", "amt"),
("eiten", "eit"), ("raten", "rat"), ("ormen",
"orm"), ("ionen", "ion"), ("nisse", "nis"),
(u"ölzer", "olz"), ("ungen", "ung"), (u"läser",
"las"), (u"ächer", "ach"), ("urten", "urt"),
("enzen", "enz"), ("aaten", "aat"), ("aphen",
"aph"), (u"öcher", "och"), (u"türen", u"tür"),
("sonen", "son"), (u"ühren", u"ühr"), (u"ühner",
"uhn"), ("toren", "tor"), (u"örter", "ort"),
("anten", "ant"), (u"räder", "rad"), ("turen",
"tur"), (u"äuler", "aul"), (u"änze", "anz"),
("tten", "tte"), ("mben", "mbe"), (u"ädte",
"adt"), ("llen", "lle"), ("ysen", "yse"),
("rben", "rbe"), ("hsen", "hse"), (u"raün",
"rau"), ("rven", "rve"), ("rken", "rke"),
(u"ünge", "ung"), (u"üten", u"üte"), ("usen",
"use"), ("tien", "tie"), (u"läne", "lan"),
("iben", "ibe"), ("ifen", "ife"), ("ssen",
"sse"), ("gien", "gie"), ("eten", "ete"),
("rden", "rde"), (u"öhne", "ohn"), (u"ärte",
"art"), ("ncen", "nce"), (u"ünde", "und"),
("uben", "ube"), ("lben", "lbe"), (u"üsse",
"uss"), ("agen", "age"), (u"räge", "rag"),
("ogen", "oge"), ("anen", "ane"), ("sken",
"ske"), ("eden", "ede"), (u"össe", "oss"),
(u"ürme", "urm"), ("ggen", "gge"), (u"üren",
u"üre"), ("nten", "nte"), (u"ühle", u"ühl"),
(u"änge", "ang"), ("mmen", "mme"), ("igen",
"ige"), ("nken", "nke"), (u"äcke", "ack"),
("oden", "ode"), ("oben", "obe"), (u"ähne",
"ahn"), (u"änke", "ank"), ("inen", "ine"),
("seen", "see"), (u"äfte", "aft"), ("ulen",
"ule"), (u"äste", "ast"), ("hren", "hre"),
(u"öcke", "ock"), ("aben", "abe"), (u"öpfe",
"opf"), ("ugen", "uge"), ("lien", "lie"),
(u"ände", "and"), (u"ücke", u"ück"), ("asen",
"ase"), ("aden", "ade"), ("dien", "die"),
("aren", "are"), ("tzen", "tze"), (u"züge",
"zug"), (u"üfte", "uft"), ("hien", "hie"),
("nden", "nde"), (u"älle", "all"), ("hmen",
"hme"), ("ffen", "ffe"), ("rmen", "rma"),
("olen", "ole"), ("sten", "ste"), ("amen",
"ame"), (u"höfe", "hof"), (u"üste", "ust"),
("hnen", "hne"), (u"ähte", "aht"), ("umen",
"ume"), ("nnen", "nne"), ("alen", "ale"),
("mpen", "mpe"), ("mien", "mie"), ("rten",
"rte"), ("rien", "rie"), (u"äute", "aut"),
("uden", "ude"), ("lgen", "lge"), ("ngen",
"nge"), ("iden", "ide"), (u"ässe", "ass"),
("osen", "ose"), ("lken", "lke"), ("eren",
"ere"), (u"üche", "uch"), (u"lüge", "lug"),
("hlen", "hle"), ("isen", "ise"), (u"ären",
u"äre"), (u"töne", "ton"), ("onen", "one"),
("rnen", "rne"), (u"üsen", u"üse"), (u"haün",
"hau"), ("pien", "pie"), ("ihen", "ihe"),
(u"ürfe", "urf"), ("esen", "ese"), (u"ätze",
"atz"), ("sien", "sie"), (u"läge", "lag"),
("iven", "ive"), (u"ämme", "amm"), (u"äufe",
"auf"), ("ppen", "ppe"), ("enen", "ene"),
("lfen", "lfe"), (u"äume", "aum"), ("nien",
"nie"), ("unen", "une"), ("cken", "cke"),
("oten", "ote"), ("mie", "mie"), ("rie",
"rie"), ("sis", "sen"), ("rin", "rin"),
("ein", "ein"), ("age", "age"), ("ern",
"ern"), ("ber", "ber"), ("ion", "ion"),
("inn", "inn"), ("ben", "ben"), (u"äse",
u"äse"), ("eis", "eis"), ("hme", "hme"),
("iss", "iss"), ("hen", "hen"), ("fer",
"fer"), ("gie", "gie"), ("fen", "fen"),
("her", "her"), ("ker", "ker"), ("nie",
"nie"), ("mer", "mer"), ("ler", "ler"),
("men", "men"), ("ass", "ass"), ("ner",
"ner"), ("per", "per"), ("rer", "rer"),
("mus", "mus"), ("abe", "abe"), ("ter",
"ter"), ("ser", "ser"), (u"äle", "aal"),
("hie", "hie"), ("ger", "ger"), ("tus",
"tus"), ("gen", "gen"), ("ier", "ier"),
("ver", "ver"), ("zer", "zer"),
]
singular = {
u"Löwen": u"Löwe",
}
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
"""Returns the singular of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if word in singular:
return singular[word]
if pos == NOUN:
for a, b in singular_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rule: strip known plural suffixes (baseline = 51%).
for suffix in ("nen", "en", "n", "e", "er", "s"):
if w.endswith(suffix):
w = w[:-len(suffix)]
break
# Corrections (these add about 1% accuracy):
if w.endswith(("rr", "rv", "nz")):
return w + "e"
return w
return w
#### VERB CONJUGATION ####################################################
# The verb table was trained on CELEX and contains the top 2000 most
# frequent verbs.
prefix_inseparable = (
"be", "emp", "ent", "er", "ge", "miss", u"über", "unter", "ver", "voll", "wider", "zer"
)
prefix_separable = (
"ab", "an", "auf", "aus", "bei", "durch", "ein", "fort", "mit", "nach", "vor", "weg",
u"zurück", "zusammen", "zu", "dabei", "daran", "da", "empor", "entgegen", "entlang",
"fehl", "fest", u"gegenüber", "gleich", "herab", "heran", "herauf", "heraus", "herum",
"her", "hinweg", "hinzu", "hin", "los", "nieder", "statt", "umher", "um", "weg",
"weiter", "wieder", "zwischen"
) + ( # There are many more...
"dort", "fertig", "frei", "gut", "heim", "hoch", "klein", "klar", "nahe", "offen", "richtig"
)
prefixes = prefix_inseparable + prefix_separable
def encode_sz(s):
return s.replace(u"ß", "ss")
def decode_sz(s):
return s.replace("ss", u"ß")
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "de-verbs.txt"),
language="de",
format=[0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21,
24, 52, 54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
default={6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
)
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
"""
v = verb.lower()
# Common prefixes: be-finden and emp-finden probably inflect like
# finden.
if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
for prefix in prefixes:
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Common sufixes: setze nieder => niedersetzen.
b, suffix = " " in v and v.split()[:2] or (v, "")
# Infinitive -ln: trommeln.
if b.endswith(("ln", "rn")):
return b
# Lemmatize regular inflections.
for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
if b.endswith(x):
b = b[:-len(x)]
break
# Subjunctive: hielte => halten, schnitte => schneiden.
for x, y in (
("ieb", "eib"), ("ied", "eid"), ("ief",
"auf"), ("ieg", "eig"), ("iel", "alt"),
("ien", "ein"), ("iess", "ass"), (u"ieß",
u"aß"), ("iff", "eif"), ("iss", "eiss"),
(u"iß", u"eiß"), ("it", "eid"), ("oss", "iess"), (u"öss", "iess")):
if b.endswith(x):
b = b[:-len(x)] + y
break
b = b.replace("eeiss", "eiss")
b = b.replace("eeid", "eit")
# Subjunctive: wechselte => wechseln
if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
b = b + "e"
# abknallst != abknalln => abknallen
if b.endswith(("hl", "ll", "ul", "eil")):
b = b + "e"
# Strip ge- from (likely) gerund:
if b.startswith("ge") and v.endswith("t"):
b = b[2:]
# Corrections (these add about 1.5% accuracy):
if b.endswith(("lnde", "rnde")):
b = b[:-3]
if b.endswith(("ae", "al", u"öe", u"üe")):
b = b.rstrip("e") + "te"
if b.endswith(u"äl"):
b = b + "e"
return suffix + b + "n"
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
# Split common prefixes.
x, x1, x2 = "", "", ""
for prefix in prefix_separable:
if v.startswith(prefix):
b, x = b[len(prefix):], prefix
x1 = (" " + x).rstrip()
x2 = x + "ge"
break
# Present tense 1sg and subjunctive -el: handeln => ich handle, du
# handlest.
pl = b.endswith("el") and b[:-2] + "l" or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(("ln", "rn")) and v or b + "en"
# Present tense ending in -d or -t gets -e:
pr = b.endswith(("d", "t")) and b + "e" or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(("s", "z")) and pr + "t" or pr + "st"
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(("en", "ln", "rn")) and v + "d" or v + "end"
# Past tense regular:
pt = encode_sz(pr) + "t"
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable)
or b.endswith(("r", "t"))) and pt or "ge" + pt
ge = x and x + "ge" + pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
lexeme = a = [
v,
pl + "e" + x1, p2 + x1, pr + "t" + x1, pw +
x1, pr + "t" + x1, pp, # present
pt + "e" + x1, pt + "est" + x1, pt + "e" +
x1, pt + "en" + x1, pt + "et" + x1, ge, # past
# imperative
b + "e" + x1, pr + "t" + x1, x + pw,
s1 + "e" + x1, s1 + "est" + x1, s1 + "en" +
x1, s1 + "et" + x1, # subjunctive I
s2 + "e" + x1, s2 + "est" + x1, s2 + "en" + x1, s2 +
"et" + x1 # subjunctive II
]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v)
for v in a[13:20]] + a[20:]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the
# lexicon.
return [a[i] or lexeme[i] for i in range(len(a))]
def tenses(self, verb, parse=True):
"""Returns a list of possible tenses for the given inflected verb."""
tenses = _Verbs.tenses(self, verb, parse)
if len(tenses) == 0:
# auswirkte => wirkte aus
for prefix in prefix_separable:
if verb.startswith(prefix):
tenses = _Verbs.tenses(
self, verb[len(prefix):] + " " + prefix, parse)
break
return tenses
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE ###########################################
# Strong inflection: no article.
adjectives_strong = {
("m", "nom"): "er", ("f", "nom"): "e", ("n", "nom"): "es", ("p", "nom"): "e",
("m", "acc"): "en", ("f", "acc"): "e", ("n", "acc"): "es", ("p", "acc"): "e",
("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en", ("p", "gen"): "er",
}
# Mixed inflection: after indefinite article ein & kein and possessive
# determiners.
adjectives_mixed = {
("m", "nom"): "er", ("f", "nom"): "e", ("n", "nom"): "es", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e", ("n", "acc"): "es", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Weak inflection: after definite article.
adjectives_weak = {
("m", "nom"): "e", ("f", "nom"): "e", ("n", "nom"): "e", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e", ("n", "acc"): "e", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Uninflected + exceptions.
adjective_attributive = {
"etwas": "etwas",
"genug": "genug",
"viel": "viel",
"wenig": "wenig"
}
def attributive(adjective, gender=MALE, role=SUBJECT, article=None):
"""For a predicative adjective, returns the attributive form (lowercase).
In German, the attributive is formed with -e, -em, -en, -er or -es,
depending on gender (masculine, feminine, neuter or plural) and role
(nominative, accusative, dative, genitive).
"""
w, g, c, a = \
adjective.lower(), gender[:1].lower(), role[
:3].lower(), article and article.lower() or None
if w in adjective_attributive:
return adjective_attributive[w]
if a is None \
or a in ("mir", "dir", "ihm") \
or a in ("ein", "etwas", "mehr") \
or a.startswith(("all", "mehrer", "wenig", "viel")):
return w + adjectives_strong.get((g, c), "")
if a.startswith(("ein", "kein")) \
or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
return w + adjectives_mixed.get((g, c), "")
if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
or a.startswith((
"derselb", "derjenig", "jed", "jeglich", "jen", "manch",
"dies", "solch", "welch")):
return w + adjectives_weak.get((g, c), "")
# Default to strong inflection.
return w + adjectives_strong.get((g, c), "")
def predicative(adjective):
"""Returns the predicative adjective (lowercase).
In German, the attributive form preceding a noun is always used:
"ein kleiner Junge" => strong, masculine, nominative,
"eine schöne Frau" => mixed, feminine, nominative,
"der kleine Prinz" => weak, masculine, nominative, etc.
The predicative is useful for lemmatization.
"""
w = adjective.lower()
if len(w) > 3:
for suffix in ("em", "en", "er", "es", "e"):
if w.endswith(suffix):
b = w[:max(-len(suffix), -(len(w) - 3))]
if b.endswith("bl"): # plausibles => plausibel
b = b[:-1] + "el"
if b.endswith("pr"): # propres => proper
b = b[:-1] + "er"
return b
return w
#### COMPARATIVE & SUPERLATIVE ###########################################
COMPARATIVE = "er"
SUPERLATIVE = "st"
def grade(adjective, suffix=COMPARATIVE):
"""Returns the comparative or superlative form of the given (inflected)
adjective."""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):]
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
# print(comparative(u"schönes"))
# print(superlative(u"schönes"))
# print(superlative(u"große"))
| shubhangiKishore/pattern | pattern/text/de/inflect.py | Python | bsd-3-clause | 32,013 | [
"ASE"
] | d897b4bec9d89106e1fa68a685559df2f7ef0951c015edce18c186e9dc41885a |
from crystal_dashboard.dashboards.crystal.projects import tabs as projects_tabs
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = projects_tabs.ProjectsTabs
template_name = 'crystal/projects/index.html'
page_title = _("Projects")
def get_data(self, request, context, *args, **kwargs):
# Add data to the context here...
return context
| Crystal-SDS/dashboard | crystal_dashboard/dashboards/crystal/projects/views.py | Python | gpl-3.0 | 457 | [
"CRYSTAL"
] | 20204a2a1211f4b74fc495b2596bf4910cd6f26e955f645815e2f142c0a1d6f7 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from six.moves import range
import itertools
import numpy as np
from numpy.testing import(
dec,
assert_equal,
assert_array_almost_equal,
assert_,
assert_array_equal,
assert_warns,
assert_raises,
)
from nose.plugins.attrib import attr
import warnings
import MDAnalysis
import MDAnalysis as mda
import MDAnalysis.core.selection
from MDAnalysis.lib.distances import distance_array
from MDAnalysis.core.topologyobjects import TopologyGroup
from MDAnalysis.core.selection import Parser
from MDAnalysis import SelectionError
from MDAnalysis.tests.datafiles import (
PSF, DCD,
PRMpbc, TRJpbc_bz2,
PSF_NAMD, PDB_NAMD,
GRO, NUCL, NUCLsel, TPR, XTC,
TRZ_psf, TRZ,
PDB_full,
PDB_icodes,
)
from MDAnalysisTests import parser_not_found, make_Universe
class TestSelectionsCHARMM(object):
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
"""Set up the standard AdK system in implicit solvent.
Geometry here is orthogonal
"""
self.universe = MDAnalysis.Universe(PSF, DCD)
def tearDown(self):
self.universe.trajectory.close()
del self.universe
def test_segid(self):
sel = self.universe.select_atoms('segid 4AKE')
assert_equal(sel.n_atoms, 3341, "failed to select segment 4AKE")
assert_array_equal(sorted(sel.indices),
sorted(self.universe.s4AKE.atoms.indices),
"selected segment 4AKE is not the same as auto-generated segment s4AKE")
def test_protein(self):
sel = self.universe.select_atoms('protein')
assert_equal(sel.n_atoms, 3341, "failed to select protein")
assert_array_equal(sorted(sel.indices),
sorted(self.universe.s4AKE.atoms.indices),
"selected protein is not the same as auto-generated protein segment s4AKE")
def test_backbone(self):
sel = self.universe.select_atoms('backbone')
assert_equal(sel.n_atoms, 855)
def test_resid_single(self):
sel = self.universe.select_atoms('resid 100')
assert_equal(sel.n_atoms, 7)
assert_equal(sel.residues.resnames, ['GLY'])
def test_resid_range(self):
sel = self.universe.select_atoms('resid 100:105')
assert_equal(sel.n_atoms, 89)
assert_equal(sel.residues.resnames,
['GLY', 'ILE', 'ASN', 'VAL', 'ASP', 'TYR'])
def test_selgroup(self):
sel = self.universe.select_atoms('not resid 100')
sel2 = self.universe.select_atoms('not group notr100', notr100=sel)
assert_equal(sel2.n_atoms, 7)
assert_equal(sel2.residues.resnames, ['GLY'])
def test_fullselgroup(self):
sel1 = self.universe.select_atoms('resid 101')
sel2 = self.universe.select_atoms('resid 100')
sel3 = sel1.select_atoms('fullgroup r100', r100=sel2)
assert_equal(sel2.n_atoms, 7)
assert_equal(sel2.residues.resnames, ['GLY'])
# resnum selections are boring here because we haven't really a mechanism yet
# to assign the canonical PDB resnums
def test_resnum_single(self):
sel = self.universe.select_atoms('resnum 100')
assert_equal(sel.n_atoms, 7)
assert_equal(sel.residues.resids, [100])
assert_equal(sel.residues.resnames, ['GLY'])
def test_resnum_range(self):
sel = self.universe.select_atoms('resnum 100:105')
assert_equal(sel.n_atoms, 89)
assert_equal(sel.residues.resids, range(100, 106))
assert_equal(sel.residues.resnames,
['GLY', 'ILE', 'ASN', 'VAL', 'ASP', 'TYR'])
def test_resname(self):
sel = self.universe.select_atoms('resname LEU')
assert_equal(sel.n_atoms, 304,
"Failed to find all 'resname LEU' atoms.")
assert_equal(sel.n_residues, 16,
"Failed to find all 'resname LEU' residues.")
assert_array_equal(sorted(sel.indices),
sorted(self.universe.s4AKE.LEU.atoms.indices),
"selected 'resname LEU' atoms are not the same as auto-generated s4AKE.LEU")
def test_name(self):
sel = self.universe.select_atoms('name CA')
assert_equal(sel.n_atoms, 214)
def test_atom(self):
sel = self.universe.select_atoms('atom 4AKE 100 CA')
assert_equal(len(sel), 1)
assert_equal(sel.resnames, ['GLY'])
assert_array_almost_equal(
sel.positions,
np.array([[20.38685226, -3.44224262, -5.92158318]],
dtype=np.float32))
def test_atom_empty(self):
sel = self.universe.select_atoms('atom 4AKE 100 XX') # Does not exist
assert_equal(len(sel), 0)
def test_type(self):
sel = self.universe.select_atoms("type 1")
assert_equal(len(sel), 253)
def test_and(self):
sel = self.universe.select_atoms('resname GLY and resid 100')
assert_equal(len(sel), 7)
def test_or(self):
sel = self.universe.select_atoms('resname LYS or resname ARG')
assert_equal(sel.n_residues, 31)
def test_not(self):
sel = self.universe.select_atoms('not backbone')
assert_equal(len(sel), 2486)
def test_around(self):
sel = self.universe.select_atoms('around 4.0 bynum 1943')
assert_equal(len(sel), 32)
def test_sphlayer(self):
sel = self.universe.select_atoms('sphlayer 4.0 6.0 bynum 1281')
assert_equal(len(sel), 66)
def test_sphzone(self):
sel = self.universe.select_atoms('sphzone 6.0 bynum 1281')
assert_equal(len(sel), 86)
def test_cylayer(self):
sel = self.universe.select_atoms('cylayer 4.0 6.0 10 -10 bynum 1281')
assert_equal(len(sel), 88)
def test_cyzone(self):
sel = self.universe.select_atoms('cyzone 6.0 10 -10 bynum 1281')
assert_equal(len(sel), 166)
def test_point(self):
ag = self.universe.select_atoms('point 5.0 5.0 5.0 3.5')
d = distance_array(np.array([[5.0, 5.0, 5.0]], dtype=np.float32),
self.universe.atoms.positions,
box=self.universe.dimensions)
idx = np.where(d < 3.5)[1]
assert_equal(set(ag.indices), set(idx))
def test_prop(self):
sel = self.universe.select_atoms('prop y <= 16')
sel2 = self.universe.select_atoms('prop abs z < 8')
assert_equal(len(sel), 3194)
assert_equal(len(sel2), 2001)
def test_bynum(self):
"Tests the bynum selection, also from AtomGroup instances (Issue 275)"
sel = self.universe.select_atoms('bynum 5')
assert_equal(sel[0].index, 4)
sel = self.universe.select_atoms('bynum 1:10')
assert_equal(len(sel), 10)
assert_equal(sel[0].index, 0)
assert_equal(sel[-1].index, 9)
subsel = sel.select_atoms('bynum 5')
assert_equal(subsel[0].index, 4)
subsel = sel.select_atoms('bynum 2:5')
assert_equal(len(subsel), 4)
assert_equal(subsel[0].index, 1)
assert_equal(subsel[-1].index, 4)
def test_byres(self):
sel = self.universe.select_atoms('byres bynum 0:5')
assert_equal(len(sel), len(self.universe.residues[0].atoms))
def test_same_resname(self):
"""Test the 'same ... as' construct (Issue 217)"""
sel = self.universe.select_atoms("same resname as resid 10 or resid 11")
assert_equal(len(sel), 331,
("Found a wrong number of atoms with same resname as "
"resids 10 or 11"))
target_resids = np.array([ 7, 8, 10, 11, 12, 14, 17, 25, 32, 37, 38,
42, 46, 49, 55, 56, 66, 73, 80, 85, 93, 95,
99, 100, 122, 127, 130, 144, 150, 176, 180,
186, 188, 189, 194, 198, 203, 207, 214])
assert_array_equal(sel.residues.resids, target_resids,
("Found wrong residues with same resname as "
"resids 10 or 11"))
def test_same_segment(self):
"""Test the 'same ... as' construct (Issue 217)"""
SNew_A = self.universe.add_Segment(segid='A')
SNew_B = self.universe.add_Segment(segid='B')
SNew_C = self.universe.add_Segment(segid='C')
self.universe.residues[:100].segments = SNew_A
self.universe.residues[100:150].segments = SNew_B
self.universe.residues[150:].segments = SNew_C
target_resids = np.arange(100) + 1
sel = self.universe.select_atoms("same segment as resid 10")
assert_equal(len(sel), 1520, "Found a wrong number of atoms in the same segment of resid 10")
assert_array_equal(sel.residues.resids,
target_resids, "Found wrong residues in the same segment of resid 10")
target_resids = np.arange(100, 150) + 1
sel = self.universe.select_atoms("same segment as resid 110")
assert_equal(len(sel), 797,
"Found a wrong number of atoms in the same segment of resid 110")
assert_array_equal(sel.residues.resids, target_resids,
"Found wrong residues in the same segment of resid 110")
target_resids = np.arange(150, self.universe.atoms.n_residues) + 1
sel = self.universe.select_atoms("same segment as resid 160")
assert_equal(len(sel), 1024,
"Found a wrong number of atoms in the same segment of resid 160")
assert_array_equal(sel.residues.resids, target_resids,
"Found wrong residues in the same segment of resid 160")
def test_empty_same(self):
ag = self.universe.select_atoms('resname MET')
# No GLY, so 'as resname GLY' is empty
ag2 = ag.select_atoms('same mass as resname GLY')
assert_(len(ag2) == 0)
def test_empty_selection(self):
"""Test that empty selection can be processed (see Issue 12)"""
# no Trp in AdK
assert_equal(len(self.universe.select_atoms('resname TRP')), 0)
def test_parenthesized_expression(self):
sel = self.universe.select_atoms(
'( name CA or name CB ) and resname LEU')
assert_equal(len(sel), 32)
def test_no_space_around_parentheses(self):
"""Test that no space is needed around parentheses (Issue 43)."""
# note: will currently be ERROR because it throws a ParseError
sel = self.universe.select_atoms('(name CA or name CB) and resname LEU')
assert_equal(len(sel), 32)
# TODO:
# test for checking ordering and multiple comma-separated selections
def test_concatenated_selection(self):
E151 = self.universe.s4AKE.atoms.select_atoms('resid 151')
# note that this is not quite phi... HN should be C of prec. residue
phi151 = E151.atoms.select_atoms('name HN', 'name N', 'name CA', 'name CB')
assert_equal(len(phi151), 4)
assert_equal(phi151[0].name, 'HN',
"wrong ordering in selection, should be HN-N-CA-CB")
def test_global(self):
"""Test the `global` modifier keyword (Issue 268)"""
ag = self.universe.select_atoms("resname LYS and name NZ")
# Lys amines within 4 angstrom of the backbone.
ag1 = self.universe.select_atoms(
"resname LYS and name NZ and around 4 backbone")
ag2 = ag.select_atoms("around 4 global backbone")
assert_array_equal(ag2.indices, ag1.indices)
class TestSelectionsAMBER(object):
def setUp(self):
"""Set up AMBER system"""
self.universe = MDAnalysis.Universe(PRMpbc, TRJpbc_bz2)
def tearDown(self):
self.universe.trajectory.close()
del self.universe
def test_protein(self):
sel = self.universe.select_atoms('protein')
assert_equal(sel.n_atoms, 22, "failed to select protein")
def test_backbone(self):
sel = self.universe.select_atoms('backbone')
assert_equal(sel.n_atoms, 7)
def test_type(self):
sel = self.universe.select_atoms('type HC')
assert_equal(len(sel), 6)
assert_equal(sel.names, ['HH31', 'HH32', 'HH33', 'HB1', 'HB2', 'HB3'])
class TestSelectionsNAMD(object):
def setUp(self):
"""Set up NAMD system"""
self.universe = MDAnalysis.Universe(PSF_NAMD, PDB_NAMD)
def tearDown(self):
self.universe.trajectory.close()
del self.universe
def test_protein(self):
# must include non-standard residues
sel = self.universe.select_atoms(
'protein or resname HAO or resname ORT')
assert_equal(sel.n_atoms, self.universe.atoms.n_atoms,
"failed to select peptide")
assert_equal(sel.n_residues, 6,
"failed to select all peptide residues")
def test_resid_single(self):
sel = self.universe.select_atoms('resid 12')
assert_equal(sel.n_atoms, 26)
assert_equal(sel.residues.resnames, ['HAO'])
def test_type(self):
sel = self.universe.select_atoms('type H')
assert_equal(len(sel), 5)
# note 4th HH
assert_array_equal(sel.names, ['HN', 'HN', 'HN', 'HH', 'HN'])
class TestSelectionsGRO(object):
def setUp(self):
"""Set up GRO system (implicit types, charges, masses, ...)"""
self.universe = MDAnalysis.Universe(GRO)
@dec.slow
def test_protein(self):
sel = self.universe.select_atoms('protein')
assert_equal(sel.n_atoms, 3341, "failed to select protein")
@dec.slow
def test_backbone(self):
sel = self.universe.select_atoms('backbone')
assert_equal(sel.n_atoms, 855)
@dec.slow
def test_resid_single(self):
sel = self.universe.select_atoms('resid 100')
assert_equal(sel.n_atoms, 7)
assert_equal(sel.residues.resnames, ['GLY'])
@dec.slow
def test_same_coordinate(self):
"""Test the 'same ... as' construct (Issue 217)"""
sel = self.universe.select_atoms("same x as bynum 1 or bynum 10")
assert_equal(len(sel), 12,
"Found a wrong number of atoms with same x as ids 1 or 10")
target_ids = np.array([ 0, 8, 9, 224, 643, 3515,
11210, 14121, 18430, 25418, 35811, 43618])
assert_array_equal(sel.indices, target_ids,
"Found wrong atoms with same x as ids 1 or 10")
def test_cylayer(self):
"""Cylinder layer selections with tricilinic periodicity (Issue 274)"""
atgp = self.universe.select_atoms('name OW')
sel = atgp.select_atoms('cylayer 10 20 20 -20 bynum 3554')
assert_equal(len(sel), 1155)
def test_cyzone(self):
"""Cylinder zone selections with tricilinic periodicity (Issue 274)"""
atgp = self.universe.select_atoms('name OW')
sel = atgp.select_atoms('cyzone 20 20 -20 bynum 3554')
assert_equal(len(sel), 1556)
class TestSelectionsXTC(object):
def setUp(self):
self.universe = MDAnalysis.Universe(TPR,XTC)
def test_same_fragment(self):
"""Test the 'same ... as' construct (Issue 217)"""
# This test comes here because it's a system with solvent,
# and thus multiple fragments.
sel = self.universe.select_atoms("same fragment as bynum 1")
assert_equal(
len(sel), 3341,
"Found a wrong number of atoms on the same fragment as id 1")
assert_array_equal(
sel.indices, self.universe.atoms[0].fragment.indices,
"Found a different set of atoms when using the 'same fragment as' construct vs. the .fragment prperty")
class TestSelectionsNucleicAcids(object):
def setUp(self):
self.universe = MDAnalysis.Universe(NUCL)
def test_nucleic(self):
rna = self.universe.select_atoms("nucleic")
assert_equal(rna.n_atoms, 739)
assert_equal(rna.n_residues, 23)
def test_nucleic_all(self):
u = mda.Universe(NUCLsel)
sel = u.select_atoms('nucleic')
assert_(len(sel) == 34)
def test_nucleicbackbone(self):
rna = self.universe.select_atoms("nucleicbackbone")
assert_equal(rna.n_residues, 23)
assert_equal(rna.n_atoms, rna.n_residues * 5 - 1)
# -1 because this is a single strand of RNA and on P is missing at the 5' end
# todo: need checks for other selection resnames such as DT DA DG DC DU
def test_nucleicbase(self):
rna = self.universe.select_atoms("nucleicbase")
assert_equal(rna.n_residues, 23)
assert_equal(rna.n_atoms, 214)
def test_nucleicsugar(self):
rna = self.universe.select_atoms("nucleicsugar")
assert_equal(rna.n_residues, 23)
assert_equal(rna.n_atoms, rna.n_residues * 5)
class BaseDistanceSelection(object):
"""Both KDTree and distmat selections on orthogonal system
Selections to check:
- Around
- SphericalLayer
- SphericalZone
- Point
Cylindrical methods don't use KDTree
"""
methods = [('kdtree', False),
('distmat', True),
('distmat', False)]
@staticmethod
def choosemeth(sel, meth, periodic):
"""hack in the desired apply method"""
if meth == 'kdtree':
sel.apply = sel._apply_KDTree
elif meth == 'distmat':
sel.apply = sel._apply_distmat
if periodic:
sel.periodic = True
else:
sel.periodic = False
return sel
def _check_around(self, meth, periodic):
sel = Parser.parse('around 5.0 resid 1', self.u.atoms)
sel = self.choosemeth(sel, meth, periodic)
result = sel.apply(self.u.atoms)
r1 = self.u.select_atoms('resid 1')
cog = r1.center_of_geometry().reshape(1, 3)
box = self.u.dimensions if periodic else None
d = distance_array(self.u.atoms.positions, r1.positions,
box=box)
ref = set(np.where(d < 5.0)[0])
# Around doesn't include atoms from the reference group
ref.difference_update(set(r1.indices))
assert_(ref == set(result.indices))
def test_around(self):
for meth, periodic in self.methods:
yield self._check_around, meth, periodic
def _check_spherical_layer(self, meth, periodic):
sel = Parser.parse('sphlayer 2.4 6.0 resid 1' , self.u.atoms)
sel = self.choosemeth(sel, meth, periodic)
result = sel.apply(self.u.atoms)
r1 = self.u.select_atoms('resid 1')
cog = r1.center_of_geometry().reshape(1, 3)
box = self.u.dimensions if periodic else None
d = distance_array(self.u.atoms.positions, cog, box=box)
ref = set(np.where((d > 2.4) & (d < 6.0))[0])
assert_(ref == set(result.indices))
def test_spherical_layer(self):
for meth, periodic in self.methods:
yield self._check_spherical_layer, meth, periodic
def _check_spherical_zone(self, meth, periodic):
sel = Parser.parse('sphzone 5.0 resid 1', self.u.atoms)
sel = self.choosemeth(sel, meth, periodic)
result = sel.apply(self.u.atoms)
r1 = self.u.select_atoms('resid 1')
cog = r1.center_of_geometry().reshape(1, 3)
box = self.u.dimensions if periodic else None
d = distance_array(self.u.atoms.positions, cog, box=box)
ref = set(np.where(d < 5.0)[0])
assert_(ref == set(result.indices))
def test_spherical_zone(self):
for meth, periodic in self.methods:
yield self._check_spherical_zone, meth, periodic
def _check_point(self, meth, periodic):
sel = Parser.parse('point 5.0 5.0 5.0 3.0', self.u.atoms)
sel = self.choosemeth(sel, meth, periodic)
result = sel.apply(self.u.atoms)
box = self.u.dimensions if periodic else None
d = distance_array(np.array([[5.0, 5.0, 5.0]], dtype=np.float32),
self.u.atoms.positions,
box=box)
ref = set(np.where(d < 3.0)[1])
assert_(ref == set(result.indices))
def test_point(self):
for meth, periodic in self.methods:
yield self._check_point, meth, periodic
class TestOrthogonalDistanceSelections(BaseDistanceSelection):
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def setUp(self):
self.u = mda.Universe(TRZ_psf, TRZ)
def tearDown(self):
del self.u
def _check_cyzone(self, meth, periodic):
sel = Parser.parse('cyzone 5 4 -4 resid 2', self.u.atoms)
sel.periodic = periodic
result = sel.apply(self.u.atoms)
other = self.u.select_atoms('resid 2')
pos = other.center_of_geometry()
vecs = self.u.atoms.positions - pos
if periodic:
box = self.u.dimensions[:3]
vecs -= box * np.rint(vecs / box)
mask = (vecs[:,2] > -4) & (vecs[:,2] < 4)
radii = vecs[:,0] ** 2 + vecs[:, 1] ** 2
mask &= radii < 5**2
ref = set(self.u.atoms[mask].indices)
assert_(ref == set(result.indices))
def test_cyzone(self):
for meth, periodic in self.methods[1:]:
yield self._check_cyzone, meth, periodic
class TestTriclinicDistanceSelections(BaseDistanceSelection):
def setUp(self):
self.u = mda.Universe(GRO)
def tearDown(self):
del self.u
class TestTriclinicSelections(object):
"""Non-KDTree based selections
This system has triclinic geometry so won't use KDTree based selections
"""
def setUp(self):
self.u = mda.Universe(GRO)
self.box = self.u.dimensions
def tearDown(self):
del self.u
def test_around(self):
r1 = self.u.select_atoms('resid 1')
ag = self.u.select_atoms('around 5.0 resid 1')
d = distance_array(self.u.atoms.positions, r1.positions, box=self.box)
idx = set(np.where(d < 5.0)[0])
# Around doesn't include atoms from the reference group
idx.difference_update(set(r1.indices))
assert_(idx == set(ag.indices))
def test_sphlayer(self):
r1 = self.u.select_atoms('resid 1')
cog = r1.center_of_geometry().reshape(1, 3)
ag = self.u.select_atoms('sphlayer 2.4 6.0 resid 1')
d = distance_array(self.u.atoms.positions, cog, box=self.box)
idx = set(np.where((d > 2.4) & (d < 6.0))[0])
assert_(idx == set(ag.indices))
def test_sphzone(self):
r1 = self.u.select_atoms('resid 1')
cog = r1.center_of_geometry().reshape(1, 3)
ag = self.u.select_atoms('sphzone 5.0 resid 1')
d = distance_array(self.u.atoms.positions, cog, box=self.box)
idx = set(np.where(d < 5.0)[0])
assert_(idx == set(ag.indices))
def test_point_1(self):
# The example selection
ag = self.u.select_atoms('point 5.0 5.0 5.0 3.5')
d = distance_array(np.array([[5.0, 5.0, 5.0]], dtype=np.float32),
self.u.atoms.positions,
box=self.box)
idx = np.where(d < 3.5)[1]
assert_equal(set(ag.indices), set(idx))
def test_point_2(self):
ag1 = self.u.atoms[:10000]
ag2 = ag1.select_atoms('point 5.0 5.0 5.0 3.5')
d = distance_array(np.array([[5.0, 5.0, 5.0]], dtype=np.float32),
ag1.positions,
box=self.box)
idx = np.where(d < 3.5)[1]
assert_equal(set(ag2.indices), set(idx))
class TestPropSelection(object):
plurals = {'mass': 'masses',
'charge': 'charges'}
op_funcs = {
'<': np.less,
'<=': np.less_equal,
'>': np.greater,
'>=': np.greater_equal,
'==': np.equal,
'!=': np.not_equal
}
opposites = {
'==': '==', '!=': '!=',
'>': '<=', '<=': '>',
'<': '>=', '>=': '<',
}
@staticmethod
def gen_sel_strings(prop, oper):
"""Generate all possible combinations of spaces in selection strings
ie:
'prop x < 1.5'
'prop x< 1.5'
'prop x <1.5'
'prop x<1.5'
"""
for x, y in itertools.product([' ', ''], [' ', '']):
yield 'prop {prop}{spc1}{oper}{spc2}1.5'.format(
prop=prop, spc1=x, oper=oper, spc2=y)
def _check_lt(self, prop, ag):
for selstr in self.gen_sel_strings(prop, '<'):
sel = ag.select_atoms(selstr)
assert_equal(set(sel.indices),
set(ag[getattr(ag, self.plurals[prop]) < 1.5].indices))
def _check_le(self, prop, ag):
for selstr in self.gen_sel_strings(prop, '<='):
sel = ag.select_atoms(selstr)
assert_equal(set(sel.indices),
set(ag[getattr(ag, self.plurals[prop]) <= 1.5].indices))
def _check_gt(self, prop, ag):
for selstr in self.gen_sel_strings(prop, '>'):
sel = ag.select_atoms(selstr)
assert_equal(set(sel.indices),
set(ag[getattr(ag, self.plurals[prop]) > 1.5].indices))
def _check_ge(self, prop, ag):
for selstr in self.gen_sel_strings(prop, '>='):
sel = ag.select_atoms(selstr)
assert_equal(set(sel.indices),
set(ag[getattr(ag, self.plurals[prop]) >= 1.5].indices))
def _check_eq(self, prop, ag):
for selstr in self.gen_sel_strings(prop, '=='):
sel = ag.select_atoms(selstr)
assert_equal(set(sel.indices),
set(ag[getattr(ag, self.plurals[prop]) == 1.5].indices))
def _check_ne(self, prop, ag):
for selstr in self.gen_sel_strings(prop, '!='):
sel = ag.select_atoms(selstr)
assert_equal(set(sel.indices),
set(ag[getattr(ag, self.plurals[prop]) != 1.5].indices))
def _check_flip(self, prop, ag, op):
func = self.op_funcs[op]
# reference group, doing things forwards
ref = ag[func(getattr(ag, self.plurals[prop]), 1.5)]
selstr = 'prop 1.5 {op} {prop}'.format(
op=self.opposites[op], prop=prop)
sel = ag.select_atoms(selstr)
assert_equal(set(ref.indices), set(sel.indices))
def test_props(self):
u = make_Universe(('masses', 'charges'))
u.atoms[::2].masses = 1.5
u.atoms[::2].charges = 1.5
for prop in ['mass', 'charge']:
for ag in [u.atoms, u.atoms[:100]]:
yield self._check_lt, prop, ag
yield self._check_le, prop, ag
yield self._check_gt, prop, ag
yield self._check_ge, prop, ag
yield self._check_eq, prop, ag
yield self._check_ne, prop, ag
# check flipping operators
for op in ('<', '>', '<=', '>=', '==', '!='):
yield self._check_flip, prop, ag, op
class TestBondedSelection(object):
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
self.u = mda.Universe(PSF, DCD)
def tearDown(self):
del self.u
def test_bonded_1(self):
ag = self.u.select_atoms('type 2 and bonded name N')
assert_(len(ag) == 3)
@staticmethod
def test_nobonds_warns():
u = make_Universe(('names',))
# empty bond topology attr
batt = mda.core.topologyattrs.Bonds([])
u.add_TopologyAttr(batt)
assert_warns(UserWarning,
u.select_atoms, 'bonded name AAA')
class TestSelectionErrors(object):
def setUp(self):
self.u = make_Universe(('names', 'masses',
'resids', 'resnames', 'resnums'))
def tearDown(self):
del self.u
def selection_fail(self, selstr):
assert_raises(SelectionError, self.u.select_atoms,
selstr)
def test_expected_errors(self):
for selstr in [
'name and H', # string selection
'name )',
'resid abcd', # resid arg parsing selection
'resnum 7a7', # rangeselection arg parsing
'resid 1-',
'prop chicken == tasty',
'prop chicken <= 7.4',
'prop mass ^^ 12.0',
'same this as resid 1', # same selection
'same resid resname mass 5.0', # same / expect
'name H and', # check all tokens used
'naem H', # unkonwn (misplet) opertaor
'resid and name C', # rangesel not finding vals
'resnum ',
'bynum or protein',
'prop mass < 4.0 hello', # unused token
'prop mass > 10. and group this', # missing group
'prop mass > 10. and fullgroup this', # missing fullgroup
]:
yield self.selection_fail, selstr
def test_segid_and_resid():
u = make_Universe(('segids', 'resids'))
ag = u.select_atoms('segid SegB and resid 1-100')
ref = ag.select_atoms('segid SegB').select_atoms('resid 1-100')
assert_array_equal(ag.indices, ref.indices)
class TestImplicitOr(object):
def setUp(self):
self.u = make_Universe(('names', 'types',
'resids', 'resnums',
'resnames', 'segids'))
def tearDown(self):
del self.u
def _check_sels(self, ref, sel):
ref = self.u.select_atoms(ref)
sel = self.u.select_atoms(sel)
assert_array_equal(ref.indices, sel.indices)
def test_string_selections(self):
for ref, sel in (
('name NameABA or name NameACA or name NameADA',
'name NameABA NameACA NameADA'),
('type TypeE or type TypeD or type TypeB',
'type TypeE TypeD TypeB'),
('resname RsC or resname RsY', 'resname RsC RsY'),
('name NameAB* or name NameACC', 'name NameAB* NameACC'),
('(name NameABC or name NameABB) and (resname RsD or resname RsF)',
'name NameABC NameABB and resname RsD RsF'),
('segid SegA or segid SegC', 'segid SegA SegC'),
):
yield self._check_sels, ref, sel
def test_range_selections(self):
# All these selections just use numeric types,
# So loop over what type of selections,
# And apply the same numeric constraints to all
for seltype in ['resid', 'resnum', 'bynum']:
for ref, sel in (
('{typ} 1 or {typ} 2', '{typ} 1 2'),
('{typ} 1:10 or {typ} 22', '{typ} 1:10 22'),
('{typ} 1:10 or {typ} 20:30', '{typ} 1:10 20:30'),
('{typ} 1-5 or {typ} 7', '{typ} 1-5 7'),
('{typ} 1-5 or {typ} 7:10 or {typ} 12',
'{typ} 1-5 7:10 12'),
('{typ} 1 or {typ} 3 or {typ} 5:10', '{typ} 1 3 5:10'),
):
yield (self._check_sels,
ref.format(typ=seltype),
sel.format(typ=seltype))
class TestICodeSelection(object):
def setUp(self):
self.u = mda.Universe(PDB_icodes)
def tearDown(self):
del self.u
def test_select_icode(self):
ag = self.u.select_atoms('resid 163A')
assert_(len(ag) == 7)
assert_array_equal(ag.ids, np.arange(7) + 1230)
def test_select_resid_implicit_icode(self):
ag = self.u.select_atoms('resid 163')
assert_(len(ag) == 6)
assert_array_equal(ag.ids, np.arange(6) + 1224)
def test_select_icode_range_1(self):
# testing range within a single resid integer value
u = self.u
ag = u.select_atoms('resid 163B-163D')
# do it manually without selection language...
ref = u.residues[u.residues.resids == 163]
ref = ref[(ref.icodes >= 'B') & (ref.icodes <= 'D')]
ref = ref.atoms
assert_array_equal(ag.ids, ref.ids)
assert_(len(ag) == 19)
assert_array_equal(ag.ids, np.arange(19) + 1237)
def test_select_icode_range_2(self):
u = self.u
ag = u.select_atoms('resid 163B-165')
resids = u.residues.resids
start = u.residues[resids == 163]
start = start[start.icodes >= 'B']
mid = u.residues[resids == 164]
end = u.residues[resids == 165]
end = end[end.icodes == '']
ref = start.atoms + mid.atoms + end.atoms
assert_array_equal(ag.ids, ref.ids)
def test_select_icode_range_3(self):
# same as #2 but with no "middle" icodes
u = self.u
ag = u.select_atoms('resid 163B-164')
resids = u.residues.resids
start = u.residues[resids == 163]
start = start[start.icodes >= 'B']
end = u.residues[resids == 164]
end = end[end.icodes == '']
ref = start.atoms + end.atoms
assert_array_equal(ag.ids, ref.ids)
def test_select_icode_range_4(self):
u = self.u
ag = u.select_atoms('resid 160-163G')
resids = u.residues.resids
start = u.residues[resids == 160]
start = start[start.icodes >= '']
mid = u.residues[(resids == 161) | (resids == 162)]
end = u.residues[resids == 163]
end = end[end.icodes <= 'G']
ref = start.atoms + mid.atoms + end.atoms
assert_array_equal(ag.ids, ref.ids)
def test_select_icode_range_5(self):
# same as #4 but with no "middle" icodes in range
u = self.u
ag = u.select_atoms('resid 162-163G')
resids = u.residues.resids
start = u.residues[resids == 162]
start = start[start.icodes >= '']
end = u.residues[resids == 163]
end = end[end.icodes <= 'G']
ref = start.atoms + end.atoms
assert_array_equal(ag.ids, ref.ids)
def test_missing_icodes_VE(self):
# trying a selection with icodes in a Universe without raises VA
u = make_Universe(('resids',))
assert_raises(ValueError, u.select_atoms, 'resid 10A')
def test_missing_icodes_range_VE(self):
u = make_Universe(('resids',))
assert_raises(ValueError, u.select_atoms, 'resid 10A-12')
| alejob/mdanalysis | testsuite/MDAnalysisTests/core/test_atomselections.py | Python | gpl-2.0 | 35,692 | [
"Amber",
"MDAnalysis",
"NAMD"
] | d706e3808cae7ac2dfdf9a77b5037c87521c6b3507b01de08451d9dc8f18f4ac |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Upp(CMakePackage):
"""
The Unified Post Processor (UPP) software package is a software
package designed to generate useful products from raw model
output.
"""
homepage = "https://github.com/NOAA-EMC/UPP"
git = "https://github.com/NOAA-EMC/UPP.git"
url = "https://github.com/NOAA-EMC/UPP/archive/refs/tags/upp_v10.0.10.tar.gz"
maintainers = ['kgerheiser', 'edwardhartnett', 'Hang-Lei-NOAA']
version('10.0.10', sha256='0c96a88d0e79b554d5fcee9401efcf4d6273da01d15e3413845274f73d70b66e')
version('10.0.9', tag='upp_v10.0.9', submodules=True)
variant('openmp', default=True)
variant('postexec', default=True)
variant('wrf-io', default=False)
variant('docs', default=False)
depends_on('mpi')
depends_on('netcdf-fortran')
depends_on('bacio')
depends_on('crtm')
depends_on('g2')
depends_on('g2tmpl')
depends_on('ip')
depends_on('nemsio', when='+postexec')
depends_on('sfcio', when='+postexec')
depends_on('sigio', when='+postexec')
depends_on('sp', when='+postexec')
depends_on('w3nco', when='+postexec')
depends_on('wrf-io', when='+wrf-io')
depends_on('doxygen', when='+docs')
def cmake_args(self):
args = [
self.define_from_variant('OPENMP', 'openmp'),
self.define_from_variant('BUILD_POSTEXEC', 'postexec'),
self.define_from_variant('BUILD_WITH_WRFIO', 'wrf-io'),
self.define_from_variant('ENABLE_DOCS', 'docs')
]
return args
| LLNL/spack | var/spack/repos/builtin/packages/upp/package.py | Python | lgpl-2.1 | 1,747 | [
"NetCDF"
] | d29fab6885a84fc1bcc28b74ad55a310836adb6d7c3120450cd9e41351c6e628 |
from django.db import models
from mezzanine.utils.models import AdminThumbMixin
from mezzanine.utils.models import upload_to
from mezzanine.core.fields import RichTextField
from django.contrib.auth.models import User
from mezzanine.core.models import Displayable, Orderable, RichText, Ownable,\
Slugged
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields.related import ForeignKey
import datetime
from sfpirgapp.fields import MyImageField
from mezzanine.pages.fields import MenusField
from django.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.core.models import CONTENT_STATUS_DRAFT
#from django import forms
User._meta.ordering=["username"]
class PageLike(Orderable, Displayable, RichText, AdminThumbMixin):
titles = models.CharField(editable=False, max_length=1000, null=True)
login_required = models.BooleanField(_("Login required"),
default=False,
help_text=_("If checked, only logged in users can view this page"))
featured_image = MyImageField(verbose_name=_("Featured Image"),
upload_to=upload_to("images", "uploads/images"),
format="Image", max_length=255, null=True, blank=True)
admin_thumb_field = "featured_image"
search_fields = ('title', 'content')
class Meta:
abstract = True
class Profile(models.Model, AdminThumbMixin):
organization = ForeignKey('Organization', null=True, blank=True,
on_delete=models.SET_NULL,)
user = models.OneToOneField(User)
date_of_birth = models.DateField(null=True, blank=True)
title = models.CharField(null=True, blank=True, max_length=255)
bio = RichTextField(null=True, blank=True)
photo = MyImageField(verbose_name="Photo",
upload_to=upload_to("sfpirgapp.Profile.photo", "uploads/profile-photos"),
format="Image", max_length=255, null=True, blank=True,
help_text='User photo')
admin_thumb_field = "photo"
on_mailing_list = models.BooleanField(default=True, verbose_name='Would you like to be added to our mailing list?',
help_text='Would you like to be added to our mailing list to receive periodic information about social and environmental justice happenings on and off campus?')
class Testimonial(PageLike):
user = ForeignKey(User, null=True, blank=True, verbose_name=_("Author"), related_name="testimonials",
on_delete=models.SET_NULL,)
category = ForeignKey('Category', related_name='testimonials')
author_full_name = models.CharField(verbose_name='Your Full Name', max_length=255, null=True, blank=True)
author_title = models.CharField(verbose_name='Your area of study or job title', max_length=255 ,null=True, blank=True)
author_email = models.EmailField('Email', max_length=255, null=True, blank=True)
def get_author_full_name(self):
if self.author_full_name:
return self.author_full_name
if self.user:
return self.user.get_full_name() or self.user.username
return ''
def get_author_title(self):
return self.author_title or (self.user and self.user.profile and self.user.profile.title) or ''
@models.permalink
def get_absolute_url(self):
return ('testimonial', (), {'slug': self.slug})
class Meta:
verbose_name = 'Experience'
verbose_name_plural = 'Experiences'
class DummyTable(models.Model):
pass
def DummyEmptyResultSet():
return DummyTable.objects.filter(pk=-1)
class Category(PageLike, Ownable):
class Meta:
verbose_name = u'Category'
verbose_name_plural = u'Categories'
@models.permalink
def get_absolute_url(self):
return ('category', (), {'slug': self.slug})
class ActionGroupRequest(models.Model):
title = models.CharField('Proposed Action Group Name', max_length=255)
contact_person = models.CharField('Main Contact Person', max_length=255, null=True, blank=True)
contact_email = models.EmailField('Email', max_length=255, null=True, blank=True)
contact_phone = models.CharField('Phone', max_length=255, null=True, blank=True)
group_email = models.CharField('Desired email address for Action Group',
max_length=255,
null=True,
blank=True,
help_text='Desired email address for Action Group: (___________________@sfpirg.ca)')
basis_of_unity = models.TextField('General Basis of Unity / Objective',
null=True,
blank=True)
goals = models.TextField('Main Goal(s)',
null=True,
blank=True)
timeline = models.TextField('Plans and Timeline', null=True, blank=True,
help_text='Specific Plans and timeline for the semester (please be as concrete as possible)')
oneliner = models.TextField('One-liner for SFPIRG promotional materials', null=True, blank=True)
twoliner = models.TextField('One paragraph for SFPIRG website', null=True, blank=True)
potential_members = models.TextField('Potential members of your group', null=True, blank=True,
help_text='Please include the members of your potential Action Group: (NAME, PHONE, EMAIL)')
on_mailing_list = models.BooleanField('Would you like to be added to our mailing list?',
default=True,
help_text='Would you like to be added to our mailing list to receive periodic information about social and environmental justice happenings on and off campus?')
is_processed = models.BooleanField('Request already processed', default=False)
@property
def content(self):
parts = []
if self.oneliner:
parts.append(self.oneliner)
if self.twoliner:
parts.append(self.twoliner)
return '\n'.join(parts)
class ActionGroup(PageLike, Ownable):
parent = None # To make it compatible with the side_menu template
children = DummyEmptyResultSet() # To make it compatible with the side_menu template
category = ForeignKey(Category, related_name='action_groups')
announcements = RichTextField(null=True, blank=True,
verbose_name='Announcements',
help_text='Use this section to let people know about any upcoming events, volunteer opportunities, new initiatives - or just anything you want to draw attention to.')
meetings = RichTextField(null=True, blank=True,
verbose_name='Meetings',
help_text='Let people know when & where you meet if you have regular meeting times. Don\'t forget you can book the SFPIRG lounge or meeting room to host your meetings.')
contact_name = models.CharField('Main Contact Person',
null=True, blank=True, max_length=255)
contact_email = models.EmailField(null=True, blank=True, max_length=255,
verbose_name='Contact Email')
contact_phone = models.CharField(null=True, blank=True, max_length=255,
verbose_name='Contact Telephone')
group_email = models.EmailField(null=True, blank=True, max_length=255,
verbose_name='Group Email')
goals = RichTextField('Main Goal(s)', null=True, blank=True)
timeline = RichTextField('Plans and Timeline', null=True, blank=True,
help_text='Specific Plans and timeline for the semester (please be as concrete as possible)')
oneliner = RichTextField('One-liner for SFPIRG promotional materials', null=True, blank=True)
twoliner = RichTextField('One paragraph for SFPIRG website', null=True, blank=True)
potential_members = RichTextField('Potential members of your group', null=True, blank=True,
help_text='Please include the members of your potential Action Group: (NAME, PHONE, EMAIL)')
links = RichTextField(null=True, blank=True,
verbose_name='Links',
help_text='Either to your website, or anywhere else you want to direct people to')
facebook_url = models.URLField(null=True, blank=True, max_length=255)
twitter = models.CharField(null=True, blank=True, max_length=255)
google_plus_url = models.URLField(null=True, blank=True, max_length=255)
mailing_list_url = models.URLField(null=True, blank=True, max_length=255,
verbose_name='Link to Mailing List',
help_text='You can create a free html email newsletter using mailchimp (www.mailchimp.com). Then people can automatically subscribe to your news. If you already have one, put in your Mailchimp List page address here. Visit mailchimp.com to get it quick')
is_approved = models.BooleanField(default=False)
in_menus = MenusField("Show in menus", blank=True, null=True)
@property
def richtextpage(self):
return self
def in_menu_template(self, template_name):
if self.in_menus is not None:
for i, _l, t in settings.PAGE_MENU_TEMPLATES:
if not unicode(i) in self.in_menus and t == template_name:
return False
return True
@models.permalink
def get_absolute_url(self):
return ('action-group', (), {'slug': self.slug})
def twitter_url(self):
if not self.twitter:
return ''
if self.twitter.startswith('http://') or self.twitter.startswith('https://'):
return self.twitter
if self.twitter.startswith('@'):
return 'http://twitter.com/%s' % self.twitter[1:]
return 'http://twitter.com/%s' % self.twitter
def save(self, *args, **kwargs):
if self.is_approved:
self.status = CONTENT_STATUS_PUBLISHED
else:
self.status = CONTENT_STATUS_DRAFT
return super(ActionGroup, self).save(*args, **kwargs)
class Address(models.Model):
city = models.CharField(max_length=255)
street = models.CharField(max_length=255)
street2 = models.CharField(max_length=255, default='', blank=True, null=True)
postal_code = models.CharField(max_length=255)
class Meta:
verbose_name_plural = u'Addresses'
def __unicode__(self):
return '%s %s %s %s' % (self.street, (self.street2 or ''), self.city, self.postal_code)
__str__ = __unicode__
class Organization(Slugged):
mailing_city = models.CharField(max_length=255, verbose_name='City')
mailing_street = models.CharField(max_length=255, verbose_name='Street Address')
mailing_street2 = models.CharField(max_length=255, default='', blank=True, null=True, verbose_name='Street Address (2nd line)')
mailing_postal_code = models.CharField(max_length=255, verbose_name='Postal Code')
mandate = RichTextField(null=True, blank=True,
verbose_name="Organization's Goal",
help_text="What is your organization's goal or mandate?")
communities = RichTextField(null=True, blank=True,
verbose_name='Communities you work with',
help_text='What community or communities do you represent or work with?')
sources_of_funding = RichTextField(verbose_name="Organization's sources of funding",
help_text="What are your organization's principal sources of funding?")
is_registered = models.BooleanField(default=False, verbose_name='Are you a registered non-profit?')
website = models.URLField(null=True, blank=True, verbose_name='Website URL',
help_text='Website must begin with "http://"')
contact_name = models.CharField(null=True, blank=True, max_length=255, verbose_name='Contact Name',
help_text='Who can SFPIRG contact with questions about this project?')
contact_position = models.CharField(null=True, blank=True, max_length=255, verbose_name='Contact Position',
help_text='What position do they hold in the organization?')
contact_email = models.EmailField(null=True, blank=True, max_length=255, verbose_name='Contact Email')
contact_alt_email = models.EmailField(null=True, blank=True, max_length=255, verbose_name='Contact Alternative Email')
contact_phone = models.CharField(null=True, blank=True, max_length=255, verbose_name='Contact Phone Number')
class Liaison(models.Model):
name = models.CharField(max_length=255, verbose_name='Contact Name',
help_text='Who can SFPIRG contact with questions about this project?')
position = models.CharField(max_length=255, verbose_name='Contact Position',
help_text='What position do they hold in the organization?')
email = models.EmailField(max_length=255, verbose_name='Contact Email')
alt_email = models.EmailField(max_length=255, blank=True, null=True, verbose_name='Alternative Email')
phone = models.CharField(max_length=255, verbose_name='Contact Phone Number')
organization = ForeignKey(Organization, related_name='liaisons')
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
__str__ = __unicode__
def as_p(self):
retval = '<br/>'.join(['Name: %s' % self.name,
'Position: %s' % self.position,
'Email: %s %s' % (self.email, self.alt_email),
'Phone: %s' % self.phone])
return '<p>%s</p>' % retval
class ProjectType(Slugged):
pass
class ProjectSubject(Slugged):
pass
class Project(Displayable, AdminThumbMixin):
user = ForeignKey(User)
liaison = ForeignKey(Liaison, blank=True, null=True,
on_delete=models.SET_NULL,
help_text='Who can SFPIRG contact with questions about this project?')
time_per_week = RichTextField(blank=True, null=True,
verbose_name='How much time per week can the Contact/Liaison devote to the student?')
support_method = RichTextField(blank=True, null=True,
verbose_name='How will the Contact/Liaison provide direction and support to the project?')
logo = MyImageField(verbose_name="Project Image",
upload_to=upload_to("sfpirgapp.project", "uploads/project-images"),
format="Image", max_length=255, null=True, blank=True,
help_text='Please upload an image to represent the project, or your logo. If you do not have one, do not worry, just leave this section blank.')
admin_thumb_field = "logo"
project_type = models.ManyToManyField(ProjectType, help_text='(Please select all that apply)')
project_type_other = models.CharField(blank=True, null=True, max_length=255, verbose_name='Other Description',
help_text='If you checked "other", please briefly describe your project type')
project_subject = models.ManyToManyField(ProjectSubject, verbose_name='Project Issues',
help_text='(Please select all that apply)')
project_subject_other = models.CharField(blank=True, null=True, max_length=255, verbose_name='Other Issues',
help_text='If you checked "other", please briefly describe your project subject')
length = models.CharField(null=True, blank=True, max_length=255, verbose_name='Project Duration',
help_text=('(Please indicate how many months you expect this project to take; '
'keeping in mind that if your project will take longer than one semester '
'to complete the pool of students who can undertake it will be limited '
'to grad students and students who undertake the project independently/not '
'for course credit. Semesters run from Sept-Dec, Jan-Apr & May-Aug.)'))
description_long = RichTextField(blank=True, null=True,
verbose_name='About this Project',
help_text='(What is the central research question you want answered or what project would you like help with? Please provide a detailed description of your project here.)')
results_plan = RichTextField(blank=True, null=True, verbose_name='Use of Project Results',
help_text='(How do you plan to use the results of this project? For example, do you plan to publish it or will it be kept internal to your organization?)')
larger_goal = RichTextField(blank=True, null=True, verbose_name='Deliverables',
help_text='(What do you want as specific deliverables for this project? For example, you might want a 10 page research paper on a topic, plus an executive summary, plus a power-point presentation to your organization\'s board of directors.)')
researcher_qualities = RichTextField(blank=True, null=True, verbose_name='The Student Researcher Must Possess',
help_text='(What skills or attributes do you hope the student researcher will possess?)')
date_created = models.DateTimeField(auto_now_add=True)
date_start = models.DateField('Approval Date', blank=True, null=True)
is_submitted = models.BooleanField(default=False)
is_approved = models.BooleanField(default=False)
is_underway = models.BooleanField(default=False)
is_finished = models.BooleanField(default=False)
is_completed_successfully = models.BooleanField(default=False)
category = ForeignKey(Category, related_name='arx_projects')
in_menus = MenusField("Show in menus", blank=True, null=True)
admin_notes = models.TextField(blank=True, null=True,
help_text='Internal Admin notes, not shown to the front-end users')
search_fields = ('title', 'description_long', 'results_plan', 'larger_goal', 'researcher_qualities')
def get_description(self):
return self.description_long
def set_description(self, value):
pass
description = property(get_description, set_description)
@property
def richtextpage(self):
return self
def in_menu_template(self, template_name):
if self.in_menus is not None:
for i, l, t in settings.PAGE_MENU_TEMPLATES:
if not unicode(i) in self.in_menus and t == template_name:
return False
return True
@property
def organization_title(self):
try:
return self.user.profile.organization.title
except:
return '[None]'
@property
def featured_image(self):
return self.logo
@property
def content(self):
return self.description_long
@models.permalink
def get_absolute_url(self):
return ('arx-project', (), {'slug': self.slug})
@models.permalink
def get_apply_url(self):
return ('arx-project-apply', (), {'slug': self.slug})
@property
def formatted_project_subject(self):
subjects = [subj.title for subj in self.project_subject.all()]
if subjects == ['Other']:
return self.project_subject_other
return ', '.join(subjects)
def save(self, *args, **kwargs):
# Can't save a state that violates consistency
if self.is_approved and not self.is_submitted:
return
if ((self.is_completed_successfully or self.is_finished or self.is_underway)
and not (self.is_approved and self.is_submitted)):
return
if ((self.is_completed_successfully or self.is_finished)
and not (self.is_approved and self.is_submitted and self.is_underway)):
return
if self.is_completed_successfully and not self.is_finished:
return
if self.is_approved:
self.date_start = datetime.datetime.now()
return super(Project, self).save(*args, **kwargs)
class Application(models.Model):
name = models.CharField('Your Name', max_length=255)
email = models.EmailField(null=True, blank=True, max_length=255, verbose_name='Your Email')
project = ForeignKey(Project)
timestamp = models.DateTimeField(default=datetime.datetime.utcnow)
message = models.TextField()
def __unicode__(self):
return '%s: %s (%s...)' % (self.email, self.project.title, self.message[:20])
class Settings(models.Model):
name = models.CharField(max_length=255)
value = models.CharField(max_length=255)
def __unicode__(self):
return self.name
@classmethod
def get_setting(cls, name, default_value=None):
for rec in cls.objects.filter(name=name):
return rec.value
retval = getattr(settings, name, default_value or '-')
cls.objects.create(name=name, value=retval)
return retval
| orlenko/sfpirg | sfpirgapp/models.py | Python | bsd-2-clause | 21,029 | [
"VisIt"
] | ba7931d4ada477f40fa202a309466237d46547d37e27fa26aaf62d8a5b02f6fd |
import numpy as np
import matplotlib.pyplot as plt
from util import utils
import mpfit
import scipy.optimize as optimize
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.integrate as integrate
#This program tries to fit the white dwarf and bright spot component of the light curve. The bright spot is not modeled correctly. DwarfFit.py is a more sophisticated program that only fits the white dwarf component.
npzDataFile = '/Scratch/dataProcessing/SDSS_J0926/Dec8fitpsfBlue.npz'
#Dec8 eclipse 1: [0:300], TFp0=(1,.7,10.5,-2.5,1,2,1,-5,-1.5,1,1,2), sigma= [10]*110+[5]*130+[10]*60
#Dec8 eclipse 2: [600:900], TFp0=(1,2,10.5,-2.5,1,1,.8,-3,-1.5,1,1,2), sigma= [10]*60+[1]*130+[10]*110
cutoffmin = 600
cutoffmax = 900
DataFile = np.load(npzDataFile)
params = DataFile['params']
jd = DataFile['jd']
#params[:,0] are the height offset of the gaussian (so the number of background pulses)
amps = params[:,1]
widths = params[:,4]
xpos = params[:,2]
ypos = params[:,3]
curve = 2*pi*amps*widths**2
jd = jd[cutoffmin:cutoffmax]
x = (jd-jd[0])*1000
data = curve[cutoffmin:cutoffmax]/np.average(curve[cutoffmin:cutoffmax])*10
def zero_if_negative(x):
if isinstance(x, (int, long, float, complex))==True:
if x < 0:
x=0
else:
for item in range(len(x)):
if x[item] < 0 or item < 50 or item>250:
x[item]=0
return x
#theta =< pi/2 which implies mu = cos(theta) => 0
#epsilon is a limb-darkening coefficient
#trapezoidally weighted averaging?
#try singular-value decomposition (SVD)= all components contribute linearly to the light curve
def WhiteDwarf(x,a,b,c,amp,epsilon):
theta=a*x+b
mu=np.cos(theta)
mu=zero_if_negative(mu)
return amp*mu*(1-epsilon+epsilon*mu)+c # Intensity is proportional to mu*(1-epsilon+epsilon*mu)
# d is the distance along the line defining the bright spot
#beta and gamma are power-law exponents which allow some flexibility in how the brightness varies
#l is a scalelength
def BrightSpot(x,BSa,BSb,l,beta,gamma):
d=BSa*x+BSb
d=zero_if_negative(d)
S = (d/l)**beta*exp(-(d/l)**gamma) #surface brightness of the elements of the bright spot
# Rspot = l*(beta/gamma)**(1/gamma) #max surface brightness
return S
def BSIntegrate(x,width,BSa,BSb,c,BSamp,l,beta,gamma):
BSIntensity = []
# a0List = np.arange(x[0],x[len(x)-1],.1)
a0List = x
for a0 in a0List:
a0min = a0-width
I= integrate.quad(BrightSpot,a0min,a0,args=(BSa,BSb,l,beta,gamma))
I=BSamp*I[0]+c
BSIntensity.append(I)
return BSIntensity
def TotalFunction(x,a,b,c,amp,epsilon,width,BSa,BSb,BSamp,l,beta,gamma): #12 parameters
return WhiteDwarf(x,a,b,c,amp,epsilon)+BSIntegrate(x,width,BSa,BSb,0,BSamp,l,beta,gamma)
def fitTotalFunction(x,data):
sigma= [10]*60+[1]*130+[10]*110
TFpopt, TFpcov = optimize.curve_fit(TotalFunction, x, data, p0=TFp0,sigma=sigma)
print TFp0
return TFpopt
# a, b, c, amp,epsilon,width,BSa,BSb,BSamp,l,beta,gamma
TFp0=(1,2,10.5,-2.5,1,1,.8,-3,-1.5,1,1,2)
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.plot(x,BrightSpot(x, BSp0[0],BSp0[1],BSp0[2],BSp0[3],BSp0[4],BSp0[5],BSp0[6]),'r')
#ax.plot(a0List,BSIntensity/np.average(BSIntensity)*10,'k')
TFparams = fitTotalFunction(x,data)
print TFparams
#TFparams=TFp0
TF0 = TotalFunction(x, TFp0[0],TFp0[1],TFp0[2],TFp0[3],TFp0[4],TFp0[5],TFp0[6],TFp0[7],TFp0[8],TFp0[9],TFp0[10],TFp0[11])
TF = TotalFunction(x, TFparams[0],TFparams[1],TFparams[2],TFparams[3],TFparams[4],TFparams[5],TFparams[6],TFparams[7],TFparams[8],TFparams[9],TFparams[10],TFparams[11])
WD = WhiteDwarf(x, TFparams[0],TFparams[1],TFparams[2],TFparams[3],TFparams[4])
index_min=TF.argmin()
x_min=x[index_min]
eclipseTime = x_min/1000.+jd[0]
print eclipseTime
WDindex_min=WD.argmin()
WDx_min=x[WDindex_min]
WDeclipseTime = WDx_min/1000.+jd[0]
print WDeclipseTime
index_min0=TF0.argmin()
x_min0=x[index_min0]
eclipseTime0 = x_min0/1000.+jd[0]
print eclipseTime0
ax.plot([jd[60],jd[190]],[np.average(data)]*2,'o')
ax.set_title(npzDataFile + ' eclipse 1, Fitted Min Of Total Light Curve = %f\n "error" = White Dwarf Fit Min-Total Light Fit Min = %f'%(eclipseTime0,np.abs(WDeclipseTime-eclipseTime)))
ax.plot(jd,TF,'r', label = 'Total Light Curve Fitted')
ax.plot(jd,WD,'b', label = 'White Dwarf Light Curve Fitted')
ax.plot(jd,BSIntegrate(x, TFparams[5],TFparams[6],TFparams[7],TFparams[2],TFparams[8],TFparams[9],TFparams[10],TFparams[11]),'g', label = 'Bright Spot Light Curve Fitted')
ax.plot(jd,WhiteDwarf(x, TFp0[0],TFp0[1],TFp0[2],TFp0[3],TFp0[4]),'b--',label = 'dashed lines use initial parameters')
ax.plot(jd,BSIntegrate(x, TFp0[5],TFp0[6],TFp0[7],TFp0[2],TFp0[8],TFp0[9],TFp0[10],TFp0[11]),'g--')
ax.plot(jd,TF0,'r--')
ax.legend()
ax.plot(jd,data,'k')
plt.show()
| bmazin/ARCONS-pipeline | examples/Pal2012-sdss/DwarfBrightSpotFit.py | Python | gpl-2.0 | 4,811 | [
"Gaussian"
] | 74da7a6a85d371c173dd2ab42a5199d622a61696a8ac31b9807cd63c3dc3d486 |
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# License: BSD 3 clause
from __future__ import division
import itertools
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array
from ..utils.validation import DataConversionWarning
from .base import BaseEnsemble, _partition_estimators
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(trees, forest, X, y, sample_weight, verbose):
"""Private function used to build a batch of trees within a job."""
for i, tree in enumerate(trees):
if verbose > 1:
print("building tree %d of %d" % (i + 1, len(trees)))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
random_state = check_random_state(tree.random_state)
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
tree.fit(X, y,
sample_weight=curr_sample_weight,
check_input=False)
tree.indices_ = sample_counts > 0.
else:
tree.fit(X, y,
sample_weight=sample_weight,
check_input=False)
return trees
def _parallel_predict_proba(trees, X, n_classes, n_outputs):
"""Private function used to compute a batch of predictions within a job."""
n_samples = X.shape[0]
if n_outputs == 1:
proba = np.zeros((n_samples, n_classes))
for tree in trees:
proba_tree = tree.predict_proba(X)
if n_classes == tree.n_classes_:
proba += proba_tree
else:
proba[:, tree.classes_] += \
proba_tree[:, range(len(tree.classes_))]
else:
proba = []
for k in xrange(n_outputs):
proba.append(np.zeros((n_samples, n_classes[k])))
for tree in trees:
proba_tree = tree.predict_proba(X)
for k in xrange(n_outputs):
if n_classes[k] == tree.n_classes_[k]:
proba[k] += proba_tree[k]
else:
proba[k][:, tree.classes_] += \
proba_tree[k][:, range(len(tree.classes_))]
return proba
def _parallel_predict_regression(trees, X):
"""Private function used to compute a batch of predictions within a job."""
return sum(tree.predict(X) for tree in trees)
def _parallel_apply(tree, X):
"""Private helper function for parallizing calls to apply in a forest."""
return tree.tree_.apply(X)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = check_array(X, dtype=DTYPE)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_apply)(tree, X) for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
# ensure_2d=False because there are actually unit test checking we fail
# for 1d. FIXME make this consistent in the future.
X = check_array(X, dtype=DTYPE, ensure_2d=False)
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = self._validate_y(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
trees = []
for i in range(self.n_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Free allocated memory, if any
self.estimators_ = None
# Parallel loop: we use the threading backend as the Cython code for
# fitting the trees is internally releasing the Python GIL making
# threading always more efficient than multiprocessing in that case.
all_trees = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
trees[starts[i]:starts[i + 1]],
self,
X,
y,
sample_weight,
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ = list(itertools.chain(*all_trees))
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return y
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return sum(tree.feature_importances_
for tree in self.estimators_) / self.n_estimators
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _set_oob_score(self, X, y):
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y(self, y):
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, ensure_2d=False)
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.n_outputs_)
for i in range(n_jobs))
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`classes_`: array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
`n_classes_`: int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`classes_`: array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
`n_classes_`: int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
sparse_output: bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data to be transformed.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| eickenberg/scikit-learn | sklearn/ensemble/forest.py | Python | bsd-3-clause | 54,513 | [
"Brian"
] | 8f7273f67a4224423cfe025a3b214d40c7babf3c80e05d2b93df77e1c076de12 |
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from camelot.admin.action import ActionStep
from camelot.core.exception import CancelRequest
class UpdateProgress( ActionStep ):
"""
Inform the user about the progress the application is making
while executing an action. This ActionStep is not blocking. So it can
be used inside transactions and will result in a minimum of delay when
yielded. Each time an object is yielded, the progress dialog will be
updated.
.. image:: /_static/controls/progress_dialog.png
:param value: the current step
:param maximum: the maximum number of steps that will be executed. set it
to 0 to display a busy indicator instead of a progres bar
:param text: the text to be displayed inside the progres bar
:param detail: the text to be displayed below the progres bar, this text is
appended to the text already there
:param clear_details: clear the details text already there before putting
the new detail text.
"""
blocking = False
def __init__( self,
value=0,
maximum=0,
text=None,
detail=None,
clear_details=False ):
super(UpdateProgress, self).__init__()
self._value = value
self._maximum = maximum
self._text = text
self._detail = detail
self._clear_details = clear_details
def __unicode__( self ):
return u'Update Progress {0._value:03d}/{0._maximum:03d} {0._text}'.format( self )
def gui_run( self, gui_context ):
"""This method will update the progress dialog, if such dialog exists
within the GuiContext
:param gui_context: a :class:`camelot.admin.action.GuiContext` instance
"""
progress_dialog = gui_context.progress_dialog
if progress_dialog:
if progress_dialog.wasCanceled():
progress_dialog.reset()
raise CancelRequest()
progress_dialog.setMaximum( self._maximum )
progress_dialog.setValue( self._value )
if self._text != None:
progress_dialog.setLabelText( unicode(self._text) )
| jeroendierckx/Camelot | camelot/view/action_steps/update_progress.py | Python | gpl-2.0 | 3,204 | [
"VisIt"
] | 8f692df7420aba0bcbd887ae3227061aa454ea7968ba19960fbcfe0072df4085 |
from rpl.tools.api import test_bench_api
__author__ = 'ab27'
import json, logging, os, sys
import _winreg
import numpy as np
import matplotlib.pyplot as plt
from rpl.tools.api import test_bench_api as tba
import utils.part_info as pinf
import matplotlib.pyplot as plt
from matplotlib.tri import Triangulation
from matplotlib.patches import Wedge
####
# Define reused physical constants
g = 9.807 # m/s**2 - ensure that units are always consistent with this!
hatch_types = {'Hatch_Assembly_Cargo', 'Hatch_Assembly_Driver_Commander'}
#######
# Setup for VU test bench environment
def query_analysis_tools():
analysis_tools_key_name = r'SOFTWARE\Wow6432Node\META\AnalysisTools'
analysis_tools_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, analysis_tools_key_name, 0,
_winreg.KEY_READ | _winreg.KEY_WOW64_32KEY)
number_of_keys = _winreg.QueryInfoKey(analysis_tools_key)[0] # 0 means number of sub_keys
info = {}
for sub_key_id in range(0, number_of_keys):
sub_key_name = _winreg.EnumKey(analysis_tools_key, sub_key_id)
sub_key = _winreg.OpenKey(analysis_tools_key, sub_key_name)
info[sub_key_name] = {}
number_of_values = _winreg.QueryInfoKey(sub_key)[1]
for value_id in range(0, number_of_values):
value_tuple = _winreg.EnumValue(sub_key, value_id)
value_name = value_tuple[0]
value = value_tuple[1]
info[sub_key_name][value_name] = value
return info
# TODO: Uncomment next line when deploying to VU environment
#tools = query_analysis_tools()
#sys.path.append(os.path.join(tools['hatch_force']['InstallLocation']))
def excepthook(*args):
"""
Ensure that any uncaught exception is logged
"""
logging.getLogger().error('Uncaught exception: ', exc_info=args)
def get_parts_of_interest(part_type="all"):
"""
Specify the parts of interest for fetching data via API
Return separate dictionaries for separate hatch and spring types- allows script to adapt
based on what components are actually present
"""
hatch_prms = {"Property": ['hinge_max_articulation_angle'],
"ModelMetric": ['LATCH_POSITION', 'TOTAL_MASS'],
"Datum": ['CG_CSYS']}
# TODO: Don't use cg_location; unreliable for parametric hatches. Get position from CG_CSYS
# for now
# TODO: uses separate coord system markings for Cmdr vs Cargo hatches for now
htc_cmdr = hatch_prms.copy()
htc_cmdr['Datum'] = ['CG_CSYS', 'EXT_SPRING_3']
htc_cargo = hatch_prms.copy()
htc_cargo["Datum"] = ['CG_CSYS', 'EXT_SPRING_1']
poss_parts = {"Hatch_Assembly_Driver_Commander": htc_cmdr,
"Hatch_Assembly_Cargo": htc_cargo,
"Spring_Torsion_Laminated":
{"Property": ['installed_preload_angle', 'laminae_thickness',
'laminae_width', 'max_shear_stress', 'number_of_laminae',
'shear_modulus']},
"Spring_Axial_Helical":
{"Property": ['coil_diameter', 'free_length', 'number_of_coils',
'shear_modulus', 'wire_diameter']}}
if part_type == "all":
return poss_parts
elif part_type in poss_parts:
return {part_type: poss_parts[part_type]}
else:
# Todo find a better place to raise this exception
raise Exception("Requested an unrecognized component type in get_parts_of_interest")
class Assembly_Info(object):
"""
Fetch relevant information about the overall assembly, including:
- What types of hatches and springs there are
- How they are connected
- Relative geometry of parts (spring positions relative to hatch)
The total torque consists of three components:
1) the torque due to the hatch mass;
2) the torque from torsion spring bars/laminated springs (if present), and the torque
contributions from axial spring inputs
"""
def __init__(self):
self.hatches = []
self.get_hatches()
# After assembly initialization, run sanity check
assert len(self.hatches) > 0, "No hatches were found in this assembly."
def get_hatches(self):
# Fetch data, then convert to objects
for t in hatch_types:
# Get data for all hatches of this type, then create Hatch objs for all hatches of
# that type in the vehicle (eg 2 cargo hatches)
# TODO: Note [t] indexing and order of looping; we're only querying one hatch type at
# a time (due to chamfer/articulation angle difference)
hatch_type_data = tba.get_data(get_parts_of_interest(t))[t]
for hatch_name in hatch_type_data:
self.hatches.append(pinf.Hatch_Info(hatch_type_data[hatch_name], hatch_name))
def get_axial_springs(self):
# TODO: May need to reorganize this in order to get springs per hatch. Func org will
# depend on connectivity file data org and how api queries.
pass
def get_torsion_bars(self):
pass
def hatch_torque_t30(hatch):
"""
Determine the torque required to open the hatch by itself (due to hatch mass)
"""
# TODO: this assumes all calculations are in a certain unit and no conversion is required.
cg_moment_arm = [hatch.r_cg * np.cos(hatch.ang_closed + ang + phi) for ang in
hatch.angle_range]
t_hatch_mass = hatch.mass * g * np.array(cg_moment_arm)
return t_hatch_mass
def plt_geom(hatch_geom):
"""
Plot the 2D cross section of a hatch, showing:
- Side view of hatch
- Marked CG point in 2D projection
- Hinge position, marked
"""
# Start with geometry fetching
# May need to merge xyz vectors into node array first, in case we do any coord transformations
pad_vec = np.ones(hatch_geom['x'].shape)
hatch_geom['nodes'] = np.vstack((hatch_geom['x'], hatch_geom['y'],
hatch_geom['z'], pad_vec)).T
plt.figure()
tr = Triangulation(hatch_geom['nodes'][:,0], this_geom['nodes'][:,1],
triangles=this_geom['tris'])
plt.tricontourf(tr, tr.x*0.0); plt.gca().set_aspect('equal')
plt.show()
if __name__ == "__main__":
#############
# User specified settings from separate file
with open(r"settings.js", "r") as f:
SETTINGS = json.load(f)
## Set up logging
logging.info("\n"+50*"_"+'\nTest bench started\n'+50*"_")
# Uncaught exceptions and assertions should write error to log
sys.excepthook = excepthook
# Initialize API with settings
tba.load_settings(SETTINGS)
logging.debug("Data loader API initialized")
phi = 0.0
assembly = Assembly_Info()
geo = {}
for t in hatch_types:
geo.update(tba.load_geometry({t}))
show3d = False
if show3d:
from mayavi import mlab
from itertools import cycle
hatch_colors = cycle([(.8, .4, .4),
(.4, .8, .4),
(.4, .4, .8),
(.8, .8, .4)])
for h in assembly.hatches:
hatch_torques = hatch_torque_t30(h)
this_geom = geo[h.part_id]
pad_vec = np.ones(this_geom['x'].shape)
this_geom['nodes'] = np.vstack((this_geom['x'],this_geom['y'],
this_geom['z'], pad_vec)).T
# Coordinates should align with hinge csys
transformed_crds = np.dot(this_geom['nodes'], h.hinge_csys)
if show3d:
# Plot cg pt projected onto desired plane in blue
mlab.points3d(*h.cg_pt, scale_factor=0.1, color=(0, 0, 1))
# Plot CG point (in 3d) from cg_csys in green
mlab.points3d(*h.cg_csys[:-1, 3], scale_factor=0.1, color=(0, 1, 0))
# Plot hinge csys center pt (in 3d) in white
mlab.points3d(*h.hinge_csys[:-1, 3], scale_factor=0.1)
# Plot hatch geom in an appropriate color
h_geo = geo[h.part_id]
mlab.triangular_mesh(h_geo['x'], h_geo['y'], h_geo['z'], h_geo['tris'],
color=hatch_colors.next())
# Plot lines corresponding to at very least the xy plane that we're projecting cg
# points onto
pln_csys = h.hinge_csys
pln_orig = pln_csys[:-1, 3]
v_x = np.dot(pln_csys[:3, :3], np.array([1, 0, 0]))
v_y = np.dot(pln_csys[:3, :3], np.array([0, 1, 0]))
v_z = np.dot(pln_csys[:3, :3], np.array([0, 0, 1]))
# vec = orig--> orig + vec
x_line_end = pln_orig + v_x
y_line_end = pln_orig + v_y
z_line_end = pln_orig + v_z
mlab.plot3d(*zip(pln_orig, x_line_end), line_width=2, color=(1, 0, 0))
mlab.plot3d(*zip(pln_orig, y_line_end), line_width=2, color=(0, 1, 0))
mlab.plot3d(*zip(pln_orig, z_line_end), line_width=2, color=(0, 0, 1))
else:
# Plot hatch opening force (max)
plt.figure()
# TODO: plot visualization should be relative to the marked hinge coord system
tr = Triangulation(-this_geom['x'], this_geom['y'], triangles=this_geom['tris'])
# Plot filled contours, and mult tr.x by 0 because that makes everything be 1 color.
plt.tricontourf(tr, tr.x*0.0)
plt.gca().set_aspect('equal')
# Superimpose interesting hatch points on this figure
# Want the coords in 2d, so...
# Imperfect HACK in case the z coord isn't the one == 0
# Plot the CG point overlaid on the plane. Marker is blue circle
print "cg pt", h.cg_pt[:2]
plt.plot(-h.cg_pt[0], h.cg_pt[1], marker='o', color='b')
# Plot the hinge center pt. Marker is red triangle
print "hcsys all", h.hinge_csys[:-1, 3]
plt.plot(-h.hinge_csys[0, 3], h.hinge_csys[1, 3], marker='^', color='r')
if show3d:
mlab.show()
else:
plt.show() | pombredanne/metamorphosys-desktop | metamorphosys/META/analysis_tools/PYTHON_RICARDO/output_closures/scripts/hatch_visualization.py | Python | mit | 10,391 | [
"Mayavi"
] | 12e8b0f980de3137020ae8bee4a4a404e1fc6c0ea4690f26bd8505be92951e70 |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.6.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
try:
result = self._resolve()
except ImportError:
# See the nice big comment in MovedModule.__getattr__.
raise AttributeError("%s could not be imported " % self.name)
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
# It turns out many Python frameworks like to traverse sys.modules and
# try to load various attributes. This causes problems if this is a
# platform-specific module on the wrong platform, like _winreg on
# Unixes. Therefore, we silently pretend unimportable modules do not
# have any attributes. See issues #51, #53, #56, and #63 for the full
# tales of woe.
#
# First, if possible, avoid loading the module just to look at __file__,
# __name__, or __path__.
if (attr in ("__file__", "__name__", "__path__") and
self.mod not in sys.modules):
raise AttributeError(attr)
try:
_module = self._resolve()
except ImportError:
raise AttributeError(attr)
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
#MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("cPickle", "rdkit._py2_pickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
sys.modules[__name__ + ".moves." + attr.name] = attr
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# added as part of the RDKit port
if PY3:
def cmp(t1,t2):
return (t1<t2)*-1 or (t1>t2)*1
else:
cmp=cmp
| soerendip42/rdkit | rdkit/six.py | Python | bsd-3-clause | 23,870 | [
"RDKit"
] | 66b15d6535fdf1abaa7332c5604a45dbaaf1b2fa624393d80d96dd9eb7867768 |
import psycopg2
import Image, ImageDraw
import random
from visual import *
#-------- database code ------------
db = psycopg2.connect(host='csr-dyn-24.mit.edu',user='mwa',password='BowTie')
curs = db.cursor()
curs.execute('select tile_id,tile_pos_east,tile_pos_north from tile_position')
c = curs.fetchall()
#--------- 2D graphic code ----------
img = Image.new('RGB', (800,800), (255,255,255) )
draw = ImageDraw.Draw(img)
for tile in c:
id,east,north = tile
if random.random()>0.1:
tcol = (0,255,0)
else:
tcol = (255,0,0)
xp = east*2 + 400 - 4
yp = north*2 + 400 - 4
draw.rectangle( (xp,yp,xp+8,yp+8), fill=tcol, outline=(0,0,0) )
draw.text( (xp+8, yp+8), `id`, fill=(0,0,0) )
img.save('mwa.gif')
#----------- 3D graphic creation --------------
scene = display(title='MWA status', x=0, y=0, uniform=1, width=1500, height=1000)
ground = box( pos=(0.0,0.0,-0.1), size=(360.0,360.0,0.1), color=color.blue)
tiles = {}
for tile in c:
id,east,north = tile
tiles[id] = box( pos=(east,north,0.5), size=(4.0,4.0,1.0), color=color.white)
#------------ 3D graphic animation -------------
time.sleep(1)
while true:
for t in tiles.keys():
if random.random()>0.1:
tiles[t].color = color.green
else:
tiles[t].color = color.red
rate(1)
| ryandougherty/mwa-capstone | MWA_Tools/mwapy/obssched/extras/tilestat.py | Python | gpl-2.0 | 1,330 | [
"Bowtie"
] | 64744cdf98efc7a16652e33a24038dc1ecab34590a83f3614b1034fb98cfe9c1 |
import numpy as np
import logging
import random
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.family'] = 'DejaVu Sans'
import seaborn as sns
import datetime
sns.set_style("whitegrid", {'axes.grid' : False})
def gaussian(filter_size=[16,16], offset=[0,0]):
"""
Create a 2D Gaussian Filter.
Parameters
----------
filter_size : array-like, shape = (2, )
array representing the size of the filter
offset : array_like, shape = (2, )
array representing the offset from the filter center
sigma : float
standard deviation along both x and y dimentions
Returns
-------
gaussian_filter : numpy array, shape = (fitler_size[0], filter_size[1])
A Gaussian Filter
"""
x, y = filter_size
xoff, yoff = offset
sigma = (x - 1) / 8
xx, yy = np.meshgrid(np.arange(0,x),np.arange(0,y))
mu = [(x + xoff * 2 - 1)/2, (y + yoff * 2- 1)/2]
# print(xx.shape, mu)
gaussian_window = np.exp(-0.5 * (((xx - mu[0]) / sigma)**2 + ((yy - mu[1]) / sigma)**2))
gaussian_filter = 0.5 * gaussian_window / (np.pi * sigma ** 2)
# gaussian_filter = (gaussian_filter - gaussian_filter.min()) / (gaussian_filter.max() - gaussian_filter.min())
gaussian_filter = 0.6 * gaussian_filter / gaussian_filter.max()
return gaussian_filter.T
def mexican_hat(filter_size=[16,16], offset=[0,0]):
"""
Create a 2D Gaussian Filter.
Parameters
----------
filter_size : array-like, shape = (2, )
array representing the size of the filter
offset : array_like, shape = (2, )
array representing the offset from the filter center
sigma : float
standard deviation along both x and y dimentions
Returns
-------
mexican_hat : numpy array, shape = (fitler_size[0], filter_size[1])
A Mexican hat Filter
"""
x, y = filter_size
xoff, yoff = offset
sigma = (x - 1) / 8
xx, yy = np.meshgrid(np.arange(0,x),np.arange(0,y))
mu = [(x + xoff * 2 - 1)/2, (y + yoff * 2- 1)/2]
gaussian_window = np.exp(-0.5 * (((xx - mu[0]) / sigma)**2 + ((yy - mu[1]) / sigma)**2))
mexican_hat = 1 / ( np.pi * sigma ** 2) * (1 - (((xx - mu[0]) / sigma)**2 + ((yy - mu[1]) / sigma)**2)) * gaussian_window
return mexican_hat
def gabor(filter_size=[16,16], offset=[0,0], theta=np.pi/4, phi=np.pi/2, sigma=[0.1, 0.1]):
"""
Create a 2D Gaussian Filter.
Parameters
----------
filter_size : array-like, shape = (2, )
array representing the size of the filter
offset : array_like, shape = (2, )
array representing the offset from the filter center
theta : float
orientation of elongated )major axis (in radians)
phi : float
the phase offset (0 = cosine wave ; pi/2 = sine wave)
sigma : array-like, shape= (2,)
comtrol the size of the gabor filter
Returns
-------
gabor_filter : numpy array, shape = (fitler_size[0], filter_size[1])
A Gabor Filter
"""
x,y = filter_size
xoff, yoff = offset
sf = 1 / 6 # spatial frequncy
xx, yy = np.meshgrid(np.arange(0,x),np.arange(0,y))
mu = [(x + xoff * 2 - 1)/2, (y + yoff * 2- 1)/2]
stdevs = [x * sigma[0], y * sigma[1]]
xprime = (xx - mu[0]) * np.cos(theta) + (yy - mu[1]) * np.sin(theta)
yprime = - (xx - mu[0]) * np.sin(theta) + (yy - mu[1]) * np.cos(theta)
gaussian_window = np.exp(-0.5 * ((xprime / stdevs[0]) ** 2 + (yprime / stdevs[1]) ** 2))
sinusoidal_wave = np.sin(2 * np.pi * sf * yprime - phi)
gabor_filter = (gaussian_window * sinusoidal_wave).T
return gabor_filter
def square_patch(filter_size=[6,6], offset=[0,0]):
k = np.zeros(filter_size)
x,y = filter_size
xoff, yoff = offset
mu = [(x + xoff * 2 - 1)/2, (y + yoff * 2- 1)/2]
a, c = np.floor(mu).astype(int)
b, d = np.ceil(mu).astype(int) + 1
k[a:b, c:d] = 1
return k
class get_subunits():
def __init__(self, num_units=4, filter_size=[6,6], filter='square', T=10000, dt=0.01, R=10, weight='uniform', locations_fixed=True):
"""
Parameters
==========
filter_size: array
size of filter / receptive field
filter: string
the type of filter:
* 'square' : square_patch()
* 'gaussian': gaussian()
* 'gabor' : gabor()
* 'mexicanhat' :mexican_hat()
T: int
number of time steps
dt: float
size of time step in s
R: int
firing rate in Hz
"""
self.num_units = num_units
self.ndims = filter_size
offset_xlim = filter_size[0] / 2 - 1
offset_ylim = filter_size[1] / 2 - 1
self.subunits = {}
if locations_fixed:
def get_offset(circles=50):
# circles = 3
dx = np.array([2, 0 , -2, 0])
dy = np.roll(dx, 1)
all_coords = np.array([0, 0])
circle_s = all_coords - np.ones(2)
for i in range(circles):
circle_coords = circle_s.copy()
moves = np.array([dx.repeat(i+1), dy.repeat(i+1)]).T
for move in moves:
circle_c = circle_s + move
circle_coords = np.vstack([circle_coords, circle_c])
circle_s = circle_c
circle_s += -np.ones(2)
all_coords = np.vstack([all_coords, circle_coords[:-1]])
return all_coords.T
xoff_arr, yoff_arr = get_offset()
for i in range(num_units):
if locations_fixed:
xoff = xoff_arr[i]
yoff = yoff_arr[i]
else:
xoff = np.random.choice([-1, 1])* np.random.randint(offset_xlim+1)
yoff = np.random.choice([-1, 1])* np.random.randint(offset_ylim+1)
if filter is 'square':
self.subunits['S{}'.format(i)] = square_patch(filter_size, offset=[xoff, yoff])
elif filter is 'gaussian':
self.subunits['S{}'.format(i)] = gaussian(filter_size, offset=[xoff, yoff])
elif filter is 'gabor':
self.subunits['S{}'.format(i)] = gabor(filter_size, offset=[xoff, yoff], theta=np.pi/4, phi=np.pi/2, sigma=[0.1, 0.1])
elif filter is 'mexican':
self.subunits['S{}'.format(i)] = mexican_hat(filter_size, offset=[xoff, yoff])
if weight is 'random':
linear_weights = np.random.uniform(low=0.3, high=1, size=num_units)
elif weight is 'uniform':
linear_weights = np.ones(num_units) / num_units
self.subunit_weights = linear_weights
def generate_spikes(subunits_dict, weights):
ks = np.hstack([value.flatten()[:,np.newaxis] for value in subunits_dict.values()])
k = np.dot(ks, weights)
stimuli_train = np.random.normal(0,1,(filter_size[0] * filter_size[1], T))
stimuli_test = np.random.normal(0,1,(filter_size[0] * filter_size[1], T))
# rs_train = np.exp(np.dot(ks.T, stimuli_train)) * dt * R
rs_train = np.exp(np.maximum(0, np.dot(ks.T, stimuli_train))) * dt * R
r_train = np.dot(weights.T, rs_train)
spikes_train = np.random.poisson(r_train)
# spikes_train = np.random.poisson(np.maximum(1, r_train))
rs_test = np.exp(np.maximum(0, np.dot(ks.T, stimuli_test))) * dt * R
r_test = np.dot(weights.T, rs_test)
spikes_test = np.random.poisson(r_test)
return {"stimuli_train": stimuli_train,
"r_train": r_train,
"spikes_train": spikes_train,
"stimuli_test": stimuli_test,
"r_test": r_test,
"spikes_test": spikes_test,
}
spikes_dict = generate_spikes(self.subunits, linear_weights)
self.stimuli_train = spikes_dict["stimuli_train"]
self.r_train = spikes_dict["r_train"]
self.spikes_train = spikes_dict["spikes_train"]
self.stimuli_test = spikes_dict["stimuli_test"]
self.r_test = spikes_dict["r_test"]
self.spikes_test = spikes_dict["spikes_test"]
self.spikes_statistics = {'T':T,
'dt': dt,
'R': R}
ste = np.zeros((self.stimuli_train.shape[0],0))
for i,n in enumerate(self.spikes_train):
if n>0:
ss = self.stimuli_train[:,i]
tmp = np.kron(np.ones((1,n)),ss[:,np.newaxis])
ste = np.concatenate((ste,tmp),axis=1)
self.ste = ste.T
self.sta = np.mean(self.ste,axis=0)
# self.ks = np.hstack([value.flatten()[:,np.newaxis] for value in self.subunits.values()])
# self.k = np.dot(self.ks, linear_weights)
# self.stimuli = np.random.normal(0,1,(filter_size[0] * filter_size[1], T))
# rs = np.exp(np.dot(self.ks.T, self.stimuli)) * dt * R
# r = np.dot(linear_weights.T, rs)
# self.r = r
# self.spikes = np.random.poisson(r)
# self.spikes_statistics = {'T':T,
# 'dt': dt,
# 'R': R}
# ste = np.zeros((self.stimuli.shape[0],0))
# for i,n in enumerate(self.spikes):
# if n>0:
# ss = self.stimuli[:,i]
# tmp = np.kron(np.ones((1,n)),ss[:,np.newaxis])
# ste = np.concatenate((ste,tmp),axis=1)
# self.ste = ste.T
# self.sta = np.mean(self.ste,axis=0)
def plot_subunits(self, num_per_row=8, cmap=plt.cm.gray_r, savefig=None):
num_subplots = self.num_units
M = [value.flatten()[:,np.newaxis] for value in self.subunits.values()]
if num_subplots is 1:
plt.figure()
plt.imshow(M[0].reshape(self.ndims), cmap=cmap)
elif num_subplots/num_per_row <= 1:
fig, ax = plt.subplots(1, num_per_row, figsize=(num_per_row * 2,4))
for i in range(num_per_row):
if i < num_subplots:
ax[i].imshow(M[i].reshape(self.ndims), cmap=cmap)
ax[i].axes.get_xaxis().set_visible(False)
ax[i].axes.get_yaxis().set_visible(False)
ax[i].set_title('Subunit {}'.format(i))
else:
ax[i].axes.get_xaxis().set_visible(False)
ax[i].axes.get_yaxis().set_visible(False)
ax[i].axis('off')
else:
fig, ax = plt.subplots(np.ceil(num_subplots / num_per_row).astype(int), num_per_row, figsize=(num_per_row * 2, 4 * np.ceil(num_subplots / num_per_row).astype(int)))
for i in range(np.ceil(num_subplots / num_per_row).astype(int)):
for j in range(num_per_row):
idx = int(i*num_per_row+j)
if idx < num_subplots:
ax[i,j].imshow(M[idx].reshape(self.ndims), cmap=cmap)
ax[i,j].axes.get_xaxis().set_visible(False)
ax[i,j].axes.get_yaxis().set_visible(False)
ax[i,j].set_title('Subunit {}'.format(idx))
else:
ax[i,j].axes.get_xaxis().set_visible(False)
ax[i,j].axes.get_yaxis().set_visible(False)
ax[i,j].axis('off')
if savefig:
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
if savefig is 'png':
plt.savefig('subunits_{}.png'.format(timestamp))
elif savefig is 'pdf':
plt.savefig('subunits_{}.pdf'.format(timestamp))
elif savefig is 'eps':
plt.savefig('subunits_{}.eps'.format(timestamp))
def plot_spikes(self):
plt.figure()
t = np.arange(0,self.spikes_statistics['T']) * self.spikes_statistics['dt'] # time
plt.plot(t, self.spikes_train, )
plt.xlabel('Time (s)')
plt.ylabel('Number of spikes')
plt.title('Spikes')
def plot_rf(self, cmap=plt.cm.gray_r):
plt.figure()
plt.imshow(self.sta.reshape(self.ndims), cmap=cmap)
plt.title('STA') | huangziwei/pyMF3 | pymf3/datasets/_subunits.py | Python | mit | 12,559 | [
"Gaussian"
] | fa5f14d1e08540ed757b3df42dfa9c689231420b43e1c476fdf17a0a80f6028e |
#! /usr/bin/env python
#
# update_po - a gramps tool to update translations
#
# Copyright (C) 2006-2006 Kees Bakker
# Copyright (C) 2006 Brian Matherly
# Copyright (C) 2008 Stephen George
# Copyright (C) 2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
update_man.py for command line documentation.
Examples:
python update_man.py -t
Tests if 'sphinx' and 'python' are well configured.
"""
from __future__ import print_function
import os
import sys
from argparse import ArgumentParser
DOCUTILS = True
try:
import docutils.core, docutils.writers
except:
DOCUTILS = False
LANGUAGES = ['sv', 'nl', 'pl', 'cs', 'pt_BR', 'fr']
VERSION = '4.0.0'
DATE = ''
# You can set these variables from the command line.
SPHINXBUILD = 'sphinx-build'
if sys.platform == 'win32':
pythonCmd = os.path.join(sys.prefix, 'bin', 'python.exe')
sphinxCmd = os.path.join(sys.prefix, 'bin', 'sphinx-build.exe')
elif sys.platform in ['linux2', 'darwin', 'cygwin']:
pythonCmd = os.path.join(sys.prefix, 'bin', 'python')
sphinxCmd = SPHINXBUILD
else:
print ("Update Man ERROR: unknown system, don't know sphinx, ... commands")
sys.exit(0)
def tests():
"""
Testing installed programs.
We made tests (-t flag) by displaying versions of tools if properly
installed. Cannot run all commands without 'sphinx' and 'python'.
"""
try:
print("\n=================='python'=============================\n")
os.system('''%(program)s -V''' % {'program': pythonCmd})
except:
print ('Please, install python')
try:
print("\n=================='Shpinx-build'=============================\n")
os.system('''%(program)s''' % {'program': sphinxCmd})
except:
print ('Please, install sphinx')
if not DOCUTILS:
print('\nNo docutils support, cannot use -m/--man and -o/--odt arguments.')
def main():
"""
The utility for handling documentation stuff.
What is need by Gramps, nothing more.
"""
parser = ArgumentParser(
description='This program aims to handle documentation'
' and realted translated versions.',
)
parser.add_argument("-t", "--test",
action="store_true", dest="test", default=True,
help="test if 'python' and 'sphinx' are properly installed")
parser.add_argument("-b", "--build",
action="store_true", dest="build", default=False,
help="build man documentation (via sphinx-build)")
parser.add_argument("-m", "--man",
action="store_true", dest="man", default=False,
help="build man documentation (via docutils)")
parser.add_argument("-o", "--odt",
action="store_true", dest="odt", default=False,
help="build odt documentation (via docutils)")
args = parser.parse_args()
if args.test:
tests()
if args.build:
build()
if args.man and DOCUTILS:
man()
if args.odt and DOCUTILS:
odt()
def build():
"""
Build documentation.
"""
# testing stage
os.system('''%(program)s -b html . _build/html''' % {'program': sphinxCmd})
os.system('''%(program)s -b htmlhelp . _build/htmlhelp''' % {'program': sphinxCmd})
if DOCUTILS:
os.system('''%(program)s -b man . .''' % {'program': sphinxCmd})
os.system('''%(program)s -b text . _build/text''' % {'program': sphinxCmd})
os.system('''%(program)s -b changes . _build/changes''' % {'program': sphinxCmd})
#os.system('''%(program)s -b linkcheck . _build/linkcheck''' % {'program': sphinxCmd})
os.system('''%(program)s -b gettext . _build/gettext''' % {'program': sphinxCmd})
for lang in LANGUAGES:
os.system('''%(program)s -b html -D language="%(lang)s" master_doc="%(lang)s" %(lang)s %(lang)s'''
% {'lang': lang, 'program': sphinxCmd})
os.system('''%(program)s -b htmlhelp -D language="%(lang)s" master_doc="%(lang)s" %(lang)s %(lang)s'''
% {'lang': lang, 'program': sphinxCmd})
if DOCUTILS:
os.system('''%(program)s -b man %(lang)s %(lang)s'''
% {'lang': lang, 'program': sphinxCmd})
os.system('''%(program)s -b text -D language="%(lang)s" master_doc="%(lang)s" %(lang)s %(lang)s'''
% {'lang': lang, 'program': sphinxCmd})
# for update/migration
os.system('''%(program)s -b gettext -D language="%(lang)s" master_doc="%(lang)s" . _build/gettext/%(lang)s'''
% {'lang': lang, 'program': sphinxCmd})
def man():
"""
man file generation via docutils (python)
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
"""
os.system('''rst2man en.rst gramps.1''')
for lang in LANGUAGES:
os.system('''rst2man %(lang)s/%(lang)s.rst -l %(lang)s %(lang)s/gramps.1'''
% {'lang': lang})
def odt():
"""
odt file generation via docutils (python)
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
"""
os.system('''rst2odt en.rst gramps.odt''')
for lang in LANGUAGES:
os.system('''rst2odt %(lang)s/%(lang)s.rst -l %(lang)s %(lang)s/gramps.odt'''
% {'lang': lang})
if __name__ == "__main__":
main()
| Forage/Gramps | data/man/update_man.py | Python | gpl-2.0 | 6,354 | [
"Brian"
] | d1be9a98ed21cd4a7f51eb3f4b000350caf07797b07e4cc46afb85e58d9a5e02 |
#!/usr/bin/env python
"""
This is a simple installation script for casual users of pymatgen who simply
plan to use pymatgen as a basic analysis library and is not planning to
develop on it. This script should work on most Linux and Mac systems that
have Python 2.7+ installed and setuptools installed. These are the only
required pre-requisites. Once those are installed, the script should take
care of the remainder of the installation process.
There are only a few options in this script. Please note that you probably
have to *run all commands with sudo* for the installation to proceed correctly.
Simply running:
./pmg_install
will install pymatgen with the basic dependencies.
Running:
./pmg_install -f
will install pymatgen with a few more optional packages and also start an
initial setup process that guides you through basic configuration
for POTCAR and Materials API support.
Report any issues or suggestions for this script to shyuep@gmail.com.
"""
__author__ = "Shyue Ping Ong"
__version__ = "1.0"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2013"
import sys
import subprocess
import urllib
import os
import shutil
def build_enum(fortran_command="gfortran"):
enumlib_url = "http://downloads.sourceforge.net/project/enum/enum/enum.tar.gz"
currdir = os.getcwd()
state = True
try:
os.makedirs("enumlib")
os.chdir("enumlib")
urllib.urlretrieve(enumlib_url, "enum.tar.gz")
subprocess.call(["tar", "-zxf", "enum.tar.gz"])
os.chdir("celib")
os.chdir("trunk")
os.environ["F90"] = fortran_command
subprocess.call(["make"])
os.chdir(os.path.join("..", ".."))
enumpath = os.path.join("enumlib", "trunk")
os.chdir(enumpath)
subprocess.call(["make"])
for f in ["multienum.x", "makestr.x"]:
subprocess.call(["make", f])
shutil.copy(f, os.path.join("..", "..", ".."))
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
shutil.rmtree("enumlib")
return state
def build_bader(fortran_command="gfortran"):
bader_url = "http://theory.cm.utexas.edu/bader/download/bader.tar.gz"
currdir = os.getcwd()
state = True
try:
urllib.urlretrieve(bader_url, "bader.tar.gz")
subprocess.call(["tar", "-zxf", "bader.tar.gz"])
os.chdir("bader")
subprocess.call(["cp", "makefile.osx_"+fortran_command, "makefile"])
subprocess.call(["make"])
shutil.copy("bader", os.path.join("..", "bader_exe"))
os.chdir("..")
shutil.rmtree("bader")
os.remove("bader.tar.gz")
shutil.move("bader_exe", "bader")
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
return state
py_ver = sys.version_info
print("Detected Python version {}".format(".".join(map(str, py_ver))))
if py_ver < (2, 7) or py_ver >= (2, 8):
print("Python version 2.7+ required. Download and install the necessary "
"python version from http://www.python.org/download/.")
sys.exit(-1)
try:
import setuptools
print("Detected setuptools version {}".format(setuptools.__version__))
except ImportError:
print("setuptools not detected. Get it from https://pypi.python"
".org/pypi/setuptools and follow the instructions to install first.")
sys.exit(-1)
try:
gcc_ver = subprocess.Popen(["gcc", "--version"], stdout=subprocess.PIPE)\
.communicate()[0]
except:
print("gcc not found in PATH. gcc is needed for installation of numpy "
"and C extensions. For Mac users, please install Xcode and its "
"corresponding command-line tools first.")
sys.exit(-1)
try:
import pip
print("Detected pip version {}".format(pip.__version__))
except ImportError:
print("pip not detected. Installing...")
subprocess.call(["easy_install", "pip"])
try:
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs
print("Detected numpy version {}".format(numpy.__version__))
except ImportError:
print("numpy.distutils.misc_util cannot be imported. Installing...")
subprocess.call(["pip", "install", "-q", "numpy>=1.6.0"])
from numpy.distutils.misc_util import get_numpy_include_dirs
for pk in ["pyhull>=1.3.6", "pyyaml", "PyCifRW>=3.3", "requests>=1.0",
"pybtex>=0.16"]:
print("Installing {}".format(pk))
ret = subprocess.call(["pip", "install", "-q", pk])
if ret != 0:
ret = subprocess.call(["easy_install", pk])
if ret != 0:
print("Error installing required dependency {}".format(pk))
sys.exit(-1)
print
if subprocess.call(["pip", "install", "pymatgen"]) != 0:
print("Error installing pymatgen")
sys.exit(-1)
print
enum = False
bader = False
if "-f" in sys.argv:
for pk in ["scipy", "matplotlib>1.1"]:
if subprocess.call(["pip", "install", pk]) != 0:
print("Unable to install {}. Skipping...".format(pk))
if subprocess.call([
"pip", "install", "-Ivq",
"https://wiki.fysik.dtu.dk/ase-files/python-ase-3.6.0.2515.tar.gz"]
) != 0:
print("Unable to install ASE. Skipping...")
print
fortran_command = None
if subprocess.call(["ifort", "--version"]) == 0:
print("Found ifort")
fortran_command = "ifort"
elif subprocess.call(["gfortran", "--version"]) == 0:
print("Found gfortran")
fortran_command = "gfortran"
if fortran_command is not None:
print("Building enumlib")
enum = build_enum(fortran_command)
print
print("Building bader")
bader = build_bader(fortran_command)
print
else:
print("No fortran compiler found. Skipping enumlib and bader build.")
print("Performing POTCAR setup. Press Ctrl-C at any prompt to skip this "
"step.")
try:
subprocess.call(["potcar_setup"])
except:
print("Skipping POTCAR setup.")
print
print("------------ Setup complete --------------")
print("You still need to perform a few manual changes.")
print
if enum or bader:
print("Please add {} to your PATH or move the executables multinum.x, "
"makestr.x and bader to a location in your PATH."
.format(os.path.abspath(".")))
print
print("To use the Materials API, get your Materials API key at "
"https://www.materialsproject.org/profile and add it to your "
"environment")
print("export MAPI_KEY=YOUR_API_KEY")
print | yanikou19/pymatgen | docs/_static/pmg_install.py | Python | mit | 6,589 | [
"ASE",
"pymatgen"
] | fba908d7253988de821258792ef205af956012566bc5169f726755b01c670bc4 |
import sys
from cStringIO import StringIO
from time import time,sleep, gmtime
from django.conf import settings
from django import http
from django.core.mail import mail_admins
from django.core.exceptions import SuspiciousOperation
from django.dispatch import Signal
from userdata import UserData
from theapps.supervisor.sites import Site
from theapps.supervisor.cookie import CookieSigner
try:
# Use affinity classes from the configured module
from django.importlib import import_module
mod = import_module(settings.AFFINITY_MODULE)
Affinity, AffinityAccess, get_secret = mod.Affinity, mod.AffinityAccess, mod.get_secret
except ImportError:
from theapps.supervisor.affinity import Affinity, AffinityAccess, get_secret
def to_cookie_domain(http_host):
s = http_host.split(".")
if s[0] in ['media','www']: #TODO improve test to work off configuration
del s[0]
return ".".join(s)
affinity_generated_signal = Signal(providing_args=["request","response"])
affinity_replaced_signal = Signal(providing_args=["request","response"])
affinity_access_generated_signal = Signal(providing_args=["request","response"])
affinity_access_replaced_signal = Signal(providing_args=["request","response"])
class DeviceMiddleware(object):
"""
Ensures that the devices has an affinity cookie
affinity.generated First visit
affinity.replaced Cookie rejected, affinity replaced
affinity.changed Cookie changed, and will be set in response
"""
affinity_signer = CookieSigner(settings.AFFINITY_COOKIE_NAME,constructor=Affinity,get_secret=get_secret)
access_signer = CookieSigner(settings.AFFINITY_ACCESS_COOKIE_NAME,constructor=AffinityAccess,get_secret=get_secret,message_envelope="%(key)s:%(value)s:%(identity)s")
def process_request(self, request):
try:
request.affinity = self.affinity_signer.input(request.COOKIES)
except SuspiciousOperation:
request.affinity = Affinity(meta=request.META)
request.affinity.replaced = True
request.affinity.generated = False
else:
request.affinity = Affinity(meta=request.META)
request.affinity.replaced = False
try:
request.affinity_access = self.affinity_signer.input(request.COOKIES,additional={'identity':request.affinity})
except SuspiciousOperation:
request.affinity_access = AffinityAccess(meta=request.META,identity=request.affinity)
request.affinity_access.replaced = True
request.affinity_access.generated = False
else:
request.affinity_access = AffinityAccess(meta=request.META)
request.affinity_access.replaced = False
return None
def process_response(self, request, response):
cookie_domain = to_cookie_domain(request.META.get('HTTP_HOST',settings.DOMAINS[0]))
if request.affinity.changed:
if request.affinity.generated:
affinity_generated_signal.send(self,request=request,response=response)
if request.affinity.replaced:
affinity_replaced_signal.send(self,request=request,response=response)
self.affinity_signer.output(response,request.affinity,expires=settings.AFFINITY_EXPIRY,domain=cookie_domain)
if request.affinity_access.changed:
if request.affinity_access.generated:
affinity_access_generated_signal.send(self,request=request,response=response)
if request.affinity_access.replaced:
affinity_access_replaced_signal.send(self,request=request,response=response)
#TODO HttpOnly flag
self.access_signer.output(response,request.affinity_access,expires=settings.AFFINITY_EXPIRY,domain=cookie_domain,additional={'identity':request.affinity})
return response
site_patched = False
def patch_site():
"""Override the Django RequestSite constructor with a function that gets the site from the request"""
from django.contrib.sites import models
def init(self,request):
if hasattr(request,'site'):
site = request.site
else:
site = Site.objects.get_default_site()
self.domain = site.domain
self.name = site.name
models.RequestSite.__init__ = init
global site_patched
site_patched = True
class SiteMiddleware(object):
def process_request(self,request):
request.is_ajax = request.META.get('HTTP_X_REQUESTED_WITH', None) == 'XMLHttpRequest'
host = request.META.get('HTTP_HOST',settings.DOMAINS[0])
request.site = Site.objects.get_site(host)
if not site_patched:
patch_site()
#if not hasattr(request.site,"robot_rules"):
# request.site.robot_rules = get_rules_for_site(request.site)
if hasattr(settings,'URLCONFS'):
subdomain = request.META['HTTP_HOST'].split('.')[0]
#TODO proper get subdomain defaulting to www
if subdomain in settings.URLCONFS:
request.urlconf = settings.URLCONFS[subdomain]
request.start_time = time()
return None
def process_response(self, request, response):
if hasattr(request,'start_time'):
response['X-TimeSpent'] = str(time() - request.start_time)
return response
class UserDataMiddleware(object):
def process_response(self,request,response):
old_userdata = UserData(affinity=request.affinity,encoded=request.COOKIES.get(settings.USERDATA_COOKIE_NAME, None))
new_userdata = UserData(affinity=request.affinity,user=request.user)
if True:
max_age = None
expires = None
response.set_cookie(settings.USERDATA_COOKIE_NAME,
new_userdata.encoded, max_age=max_age,
expires=expires, domain=None,
path=settings.USERDATA_COOKIE_PATH,
secure=settings.USERDATA_COOKIE_SECURE or None)
return response
"""
encode userdata, set cookie if changes or old is None
"""
class ProfilerMiddleware(object):
def process_view(self, request, callback, callback_args, callback_kwargs):
if request.META['REMOTE_ADDR'] in settings.INTERNAL_IPS and 'prof' in request.GET:
import cProfile
self.profiler = cProfile.Profile()
return self.profiler.runcall(callback, request, *callback_args, **callback_kwargs)
def process_response(self, request, response):
if request.META['REMOTE_ADDR'] in settings.INTERNAL_IPS and 'prof' in request.GET:
self.profiler.create_stats()
out = StringIO()
old_stdout, sys.stdout = sys.stdout, out
self.profiler.print_stats(1)
sys.stdout = old_stdout
response.content = '<pre>%s</pre>' % out.getvalue()
return response
# Temporary, from http://code.djangoproject.com/attachment/ticket/6094/6094.2008-02-01.diff
from django.core.urlresolvers import RegexURLResolver
def resolver(request):
"""
Returns a RegexURLResolver for the request's urlconf.
If the request does not have a urlconf object, then the default of
settings.ROOT_URLCONF is used.
"""
from django.conf import settings
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
return RegexURLResolver(r'^/', urlconf)
| thepian/theapps | theapps/supervisor/middleware.py | Python | gpl-3.0 | 7,573 | [
"VisIt"
] | 638e102d758ae84f75e23ed53af4333aeb8f8456b94e88c6107ed8c3eb379f7f |
from __future__ import print_function
import sys
import random
import os
import numpy as np
import scipy
import math
from scipy import stats
from builtins import range
import time
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class TestGLMGaussian:
"""
This class is created to test the GLM algo with Gaussian family. In this case, the relationship
between the response Y and the predictor vector X is assumed to be Y = W^T * X + E where E is
unknown Gaussian noise. We generate random data set using the exact formula. Since we know
what the W is and there are theoretical solutions to calculating W, p-values, we know the solution
to W/p-values/MSE for test and training data set for each randomly generated
data set. Hence, we are able to evaluate the H2O GLM Model generated using the same random data
sets. When regularization and other parameters are enabled, theoretical solutions are no longer
available. However, we can still evaluate H2O GLM model performance by comparing the MSE from H2O model
and to the theoretical limits since they all are using the same data sets. As long as they do not
deviate too much, we consider the H2O model performance satisfactory. In particular, I have
written 8 tests in the hope to exercise as many parameters settings of the GLM algo with Gaussian
distribution as possible. Tomas has requested 2 tests to be added to test his new feature of
missing_values_handling for predictors with both categorical/real columns. Here is a list of
all tests:
test1_glm_and_theory(): theoretical values for weights, p-values and MSE are calculated.
H2O GLM model is built Gaussian family with the same random data set. We compare
the weights, p-values, MSEs generated from H2O with theory.
test2_glm_lambda_search(): test lambda search with alpha set to 0.5 per Tomas's
suggestion. Make sure MSEs generated here is comparable in value to H2O
GLM model with no regularization.
test3_glm_grid_search_over_params(): test grid search with over
various alpha values while lambda is set to be the best value obtained
from test 2. The best model performance hopefully will generate MSEs
close to H2O with no regularization in test 1.
test4_glm_remove_collinear_columns(): test parameter remove_collinear_columns=True
with lambda set to best lambda from test 2, alpha set to best alpha from Gridsearch
and solver set to the one which generate the smallest test MSEs. The same data set
is used here except that we randomly choose predictor columns to repeat and scale.
Make sure MSEs generated here is comparable in value to H2O GLM with no
regularization.
test5_missing_values(): Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to change a value to be
nan and create missing values. Since no regularization is enabled in this case,
we are able to calculate a theoretical weight/p-values/MSEs where we can
compare our H2O models with.
test6_enum_missing_values(): Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
Y = W^T * X+ E as before. Next, we go into the predictor matrix (before encoding) and randomly
decide to change a value to be nan and create missing values. Since no regularization
is enabled in this case, we are able to calculate a theoretical weight/p-values/MSEs
where we can compare our H2O models with.
test7_missing_enum_values_lambda_search(): Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
We first generate a data set that contains a random number of columns of categorical and real
value columns. Next, we encode the categorical columns using true one hot encoding. Then,
we generate the random data set using the formula Y = W^T * X+ E as before. Next, we go into
the predictor matrix (before encoding) and randomly decide to change a value to be nan and
create missing values. Lambda-search will be enabled with alpha set to 0.5. Since the
encoding is different in this case than in test6, we will compute a theoretical weights/MSEs
and compare the best H2O model MSEs with theoretical calculations and hope that they are close.
"""
# parameters set by users, change with care
max_col_count = 100 # set maximum values of train/test row and column counts
max_col_count_ratio = 200 # set max row count to be multiples of col_count to avoid over fitting
min_col_count_ratio = 50 # set min row count to be multiples of col_count to avoid over fitting
###### for debugging
# max_col_count = 5 # set maximum values of train/test row and column counts
# max_col_count_ratio = 50 # set max row count to be multiples of col_count to avoid overfitting
# min_col_count_ratio = 10
max_p_value = 50 # set maximum predictor value
min_p_value = -50 # set minimum predictor value
max_w_value = 50 # set maximum weight value
min_w_value = -50 # set minimum weight value
enum_levels = 5 # maximum number of levels for categorical variables not counting NAs
family = 'gaussian' # this test is for Gaussian GLM
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training_filename = family+"_"+curr_time+"_training_set.csv"
training_filename_duplicate = family+"_"+curr_time+"_training_set_duplicate.csv"
training_filename_nans = family+"_"+curr_time+"_training_set_NA.csv"
training_filename_enum = family+"_"+curr_time+"_training_set_enum.csv"
training_filename_enum_true_one_hot = family+"_"+curr_time+"_training_set_enum_trueOneHot.csv"
training_filename_enum_nans = family+"_"+curr_time+"_training_set_enum_NAs.csv"
training_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_training_set_enum_NAs_trueOneHot.csv"
validation_filename = family+"_"+curr_time+"_validation_set.csv"
validation_filename_enum = family+"_"+curr_time+"_validation_set_enum.csv"
validation_filename_enum_true_one_hot = family+"_"+curr_time+"_validation_set_enum_trueOneHot.csv"
validation_filename_enum_nans = family+"_"+curr_time+"_validation_set_enum_NAs.csv"
validation_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_validation_set_enum_NAs_trueOneHot.csv"
test_filename = family+"_"+curr_time+"_test_set.csv"
test_filename_duplicate = family+"_"+curr_time+"_test_set_duplicate.csv"
test_filename_nans = family+"_"+curr_time+"_test_set_NA.csv"
test_filename_enum = family+"_"+curr_time+"_test_set_enum.csv"
test_filename_enum_true_one_hot = family+"_"+curr_time+"_test_set_enum_trueOneHot.csv"
test_filename_enum_nans = family+"_"+curr_time+"_test_set_enum_NAs.csv"
test_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_test_set_enum_NAs_trueOneHot.csv"
weight_filename = family+"_"+curr_time+"_weight.csv"
weight_filename_enum = family+"_"+curr_time+"_weight_enum.csv"
total_test_number = 7 # total number of tests being run for GLM Gaussian family
ignored_eps = 1e-15 # if p-values < than this value, no comparison is performed
allowed_diff = 1e-5 # value of p-values difference allowed between theoretical and h2o p-values
duplicate_col_counts = 5 # maximum number of times to duplicate a column
duplicate_threshold = 0.8 # for each column, a coin is tossed to see if we duplicate that column or not
duplicate_max_scale = 2 # maximum scale factor for duplicated columns
nan_fraction = 0.2 # denote maximum fraction of NA's to be inserted into a column
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
enum_col = 0 # set maximum number of categorical columns in predictor
enum_level_vec = [] # vector containing number of levels for each categorical column
noise_std = 0.01 # noise variance in Gaussian noise generation added to response
noise_var = noise_std*noise_std # Gaussian noise variance
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
data_type = 2 # determine data type of data set and weight, 1: integers, 2: real
# parameters denoting filenames with absolute paths
training_data_file = os.path.join(current_dir, training_filename)
training_data_file_duplicate = os.path.join(current_dir, training_filename_duplicate)
training_data_file_nans = os.path.join(current_dir, training_filename_nans)
training_data_file_enum = os.path.join(current_dir, training_filename_enum)
training_data_file_enum_true_one_hot = os.path.join(current_dir, training_filename_enum_true_one_hot)
training_data_file_enum_nans = os.path.join(current_dir, training_filename_enum_nans)
training_data_file_enum_nans_true_one_hot = os.path.join(current_dir, training_filename_enum_nans_true_one_hot)
validation_data_file = os.path.join(current_dir, validation_filename)
validation_data_file_enum = os.path.join(current_dir, validation_filename_enum)
validation_data_file_enum_true_one_hot = os.path.join(current_dir, validation_filename_enum_true_one_hot)
validation_data_file_enum_nans = os.path.join(current_dir, validation_filename_enum_nans)
validation_data_file_enum_nans_true_one_hot = os.path.join(current_dir, validation_filename_enum_nans_true_one_hot)
test_data_file = os.path.join(current_dir, test_filename)
test_data_file_duplicate = os.path.join(current_dir, test_filename_duplicate)
test_data_file_nans = os.path.join(current_dir, test_filename_nans)
test_data_file_enum = os.path.join(current_dir, test_filename_enum)
test_data_file_enum_true_one_hot = os.path.join(current_dir, test_filename_enum_true_one_hot)
test_data_file_enum_nans = os.path.join(current_dir, test_filename_enum_nans)
test_data_file_enum_nans_true_one_hot = os.path.join(current_dir, test_filename_enum_nans_true_one_hot)
weight_data_file = os.path.join(current_dir, weight_filename)
weight_data_file_enum = os.path.join(current_dir, weight_filename_enum)
test_failed = 0 # count total number of tests that have failed
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
duplicate_col_indices = [] # denote column indices when column duplication is applied
duplicate_col_scales = [] # store scaling factor for all columns when duplication is applied
# store some model performance values for later comparison for test1
test1_r2_train = 0
test1_mse_train = 0
test1_weight = []
test1_p_values = []
test1_r2_test = 0
test1_mse_test = 0
test1_mse_train_theory = 0
test1_weight_theory = []
test1_p_values_theory = []
test1_mse_test_theory = 0
best_lambda = 0.0 # store best lambda obtained from lambda search
test_name = "pyunit_glm_gaussian.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training, validation and test data sets that are used
# by many tests. We do not want to keep loading them for each set in the hope of
# saving time. Trading off memory and speed here.
x_indices = [] # store predictor indices in the data set
y_index = [] # store response index in the data set
training_data = [] # store training data set
test_data = [] # store test data set
valid_data = [] # store validation data set
training_data_grid = [] # store combined training and validation data set for cross validation for grid search
best_alpha = -1 # store best alpha value found
best_grid_mse = -1 # store lowest MSE found from grid search
def __init__(self):
self.setup()
def setup(self):
"""
This function performs all initializations necessary to test the GLM algo for Gaussian family:
1. generates all the random values for our dynamic tests like the Gaussian
noise std, column count and row count for training/validation/test data sets;
2. generate the training/validation/test data sets with only real values;
3. insert missing values into training/valid/test data sets.
4. taken the training/valid/test data sets, duplicate random certain columns,
a random number of times and randomly scale each duplicated column;
5. generate the training/validation/test data sets with predictors containing enum
and real values as well***.
6. insert missing values into the training/validation/test data sets with predictors
containing enum and real values as well
*** according to Tomas, when working with mixed predictors (contains both enum/real
value columns), the encoding used is different when regularization is enabled or disabled.
When regularization is enabled, true one hot encoding is enabled to encode the enum
values to binary bits. When regularization is disabled, a reference level plus one hot encoding
is enabled when encoding the enum values to binary bits. Hence, two data sets are generated
when we work with mixed predictors. One with true-one-hot set to False for no regularization
and one with true-one-hot set to True when regularization is enabled.
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# randomly set Gaussian noise standard deviation as a fraction of actual predictor standard deviation
self.noise_std = random.uniform(0, math.sqrt(pow((self.max_p_value - self.min_p_value), 2) / 12))
self.noise_var = self.noise_std*self.noise_std
# randomly determine data set size in terms of column and row counts
self.train_col_count = random.randint(3, self.max_col_count) # accounts for enum columns later
self.train_row_count = int(round(self.train_col_count * random.uniform(self.min_col_count_ratio,
self.max_col_count_ratio)))
# DEBUGGING setup_data, remember to comment them out once done.
# self.train_col_count = 3
# self.train_row_count = 200
# end DEBUGGING
# randomly set number of enum and real columns in the data set, we will have at least one real column
self.enum_col = random.randint(1, self.train_col_count-1)
# randomly set maximum enum value for each categorical column
self.enum_level_vec = np.random.random_integers(2, self.enum_levels-1, [self.enum_col, 1])
# generate real value weight vector and training/validation/test data set for GLM
pyunit_utils.write_syn_floating_point_dataset_glm(self.training_data_file, self.validation_data_file,
self.test_data_file, self.weight_data_file,
self.train_row_count, self.train_col_count, self.data_type,
self.max_p_value, self.min_p_value, self.max_w_value,
self.min_w_value, self.noise_std, self.family,
self.train_row_count, self.train_row_count)
# randomly generate the duplicated and scaled columns
(self.duplicate_col_indices, self.duplicate_col_scales) = \
pyunit_utils.random_col_duplication(self.train_col_count, self.duplicate_threshold,
self.duplicate_col_counts, True, self.duplicate_max_scale)
# apply the duplication and scaling to training and test set
# need to add the response column to the end of duplicated column indices and scale
dup_col_indices = self.duplicate_col_indices
dup_col_indices.append(self.train_col_count)
dup_col_scale = self.duplicate_col_scales
dup_col_scale.append(1.0)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.training_data_file,
self.training_data_file_duplicate)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.test_data_file,
self.test_data_file_duplicate)
# insert NAs into training/test data sets
pyunit_utils.insert_nan_in_data(self.training_data_file, self.training_data_file_nans, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file, self.test_data_file_nans, self.nan_fraction)
# generate data sets with enum as well as real values
pyunit_utils.write_syn_mixed_dataset_glm(self.training_data_file_enum,
self.training_data_file_enum_true_one_hot,
self.validation_data_file_enum,
self.validation_data_file_enum_true_one_hot, self.test_data_file_enum,
self.test_data_file_enum_true_one_hot, self.weight_data_file_enum,
self.train_row_count, self.train_col_count, self.max_p_value,
self.min_p_value, self.max_w_value, self.min_w_value, self.noise_std,
self.family, self.train_row_count, self.train_row_count,
self.enum_col, self.enum_level_vec)
# insert NAs into data set with categorical columns
pyunit_utils.insert_nan_in_data(self.training_data_file_enum, self.training_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum, self.validation_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum, self.test_data_file_enum_nans, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.training_data_file_enum_true_one_hot,
self.training_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum_true_one_hot,
self.validation_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum_true_one_hot,
self.test_data_file_enum_nans_true_one_hot, self.nan_fraction)
# only preload data sets that will be used for multiple tests
self.training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file))
# set indices for response and predictor columns in data set for H2O GLM model to use
self.y_index = self.training_data.ncol-1
self.x_indices = list(range(self.y_index))
self.valid_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file))
self.test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file))
# make a bigger training set by combining data from validation data set
self.training_data_grid = self.training_data.rbind(self.valid_data)
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def teardown(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
remove_files = []
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
if sum(self.test_failed_array[0:4]):
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file, self.validation_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
else: # remove those files instead of moving them
remove_files.append(self.training_data_file)
remove_files.append(self.validation_data_file)
remove_files.append(self.test_data_file)
if sum(self.test_failed_array[0:6]):
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file, self.weight_filename)
else:
remove_files.append(self.weight_data_file)
if self.test_failed_array[3]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_duplicate, self.test_filename_duplicate)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_duplicate,
self.training_filename_duplicate)
else:
remove_files.append(self.training_data_file_duplicate)
remove_files.append(self.test_data_file_duplicate)
if self.test_failed_array[4]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_nans, self.training_filename_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_nans, self.test_filename_nans)
else:
remove_files.append(self.training_data_file_nans)
remove_files.append(self.test_data_file_nans)
if self.test_failed_array[5]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans,
self.training_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans, self.test_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans)
remove_files.append(self.training_data_file_enum)
remove_files.append(self.test_data_file_enum_nans)
remove_files.append(self.test_data_file_enum)
remove_files.append(self.validation_data_file_enum_nans)
remove_files.append(self.validation_data_file_enum)
remove_files.append(self.weight_data_file_enum)
if self.test_failed_array[6]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans_true_one_hot,
self.training_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file_enum_nans_true_one_hot,
self.validation_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans_true_one_hot,
self.test_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans_true_one_hot)
remove_files.append(self.training_data_file_enum_true_one_hot)
remove_files.append(self.validation_data_file_enum_nans_true_one_hot)
remove_files.append(self.validation_data_file_enum_true_one_hot)
remove_files.append(self.test_data_file_enum_nans_true_one_hot)
remove_files.append(self.test_data_file_enum_true_one_hot)
if not(self.test_failed): # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
# remove any csv files left in test directory, do not remove them, shared computing resources
if len(remove_files) > 0:
for file in remove_files:
pyunit_utils.remove_files(file)
def test1_glm_and_theory(self):
"""
This test is used to test the p-value/linear intercept weight calculation of our GLM
when family is set to Gaussian. Since theoretical values are available, we will compare
our GLM output with the theoretical outputs. This will provide assurance that our GLM
is implemented correctly.
"""
print("*******************************************************************************************")
print("Test1: compares the linear regression weights/p-values computed from theory and H2O GLM.")
try:
# get theoretical weights, p-values and mse
(self.test1_weight_theory, self.test1_p_values_theory, self.test1_mse_train_theory,
self.test1_mse_test_theory) = self.theoretical_glm(self.training_data_file, self.test_data_file,
False, False)
except:
print("problems with lin-alg. Got bad data set.")
sys.exit(0)
# get H2O model
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0, compute_p_values=True,
standardize=False)
model_h2o.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data)
# calculate test metrics
h2o_model_test_metrics = model_h2o.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed # used to determine if current test has failed
# print out comparison results for weight/p-values/training and test MSEs for theoretical and our H2O GLM
(self.test1_weight, self.test1_p_values, self.test1_mse_train, self.test1_r2_train, self.test1_mse_test,
self.test1_r2_test, self.test_failed) = \
pyunit_utils.extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, "\nTest1 Done!",
True, True, True, self.test1_weight_theory,
self.test1_p_values_theory,
self.test1_mse_train_theory,
self.test1_mse_test_theory,
"Comparing intercept and weights ....",
"H2O intercept and weights: ",
"Theoretical intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!",
"Comparing p-values ....", "H2O p-values: ",
"Theoretical p-values: ", "P-values are not equal!",
"P-values are close enough!",
"Comparing training MSEs ....", "H2O training MSE: ",
"Theoretical training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....", "H2O test MSE: ",
"Theoretical test MSE: ", "Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff,
self.noise_var, False)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test1_glm_and_theory",
num_test_failed, self.test_failed)
self.test_num += 1 # update test index
def test2_glm_lambda_search(self):
"""
This test is used to test the lambda search. Recall that lambda search enables efficient and
automatic search for the optimal value of the lambda parameter. When lambda search is enabled,
GLM will first fit a model with maximum regularization and then keep decreasing it until
over fitting occurs. The resulting model is based on the best lambda value. According to Tomas,
set alpha = 0.5 and enable validation but not cross-validation.
"""
print("*******************************************************************************************")
print("Test2: tests the lambda search.")
# generate H2O model with lambda search enabled
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20)
model_h2o_0p5.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data,
validation_frame=self.valid_data)
# get best lambda here
self.best_lambda = pyunit_utils.get_train_glm_params(model_h2o_0p5, 'best_lambda')
# get test performance here
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
(_, _, _, _, _, _, self.test_failed) =\
pyunit_utils.extract_comparison_attributes_and_print(model_h2o_0p5, h2o_model_0p5_test_metrics,
"\nTest2 Done!", False, False, False,
self.test1_weight, None, self.test1_mse_train,
self.test1_mse_test,
"Comparing intercept and weights ....",
"H2O lambda search intercept and weights: ",
"H2O test1 template intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!", "", "", "",
"", "", "Comparing training MSEs ....",
"H2O lambda search training MSE: ",
"H2O Test1 template training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....",
"H2O lambda search test MSE: ",
"H2O Test1 template test MSE: ",
"Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff, self.noise_var,
True)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test2_glm_lambda_search",
num_test_failed, self.test_failed)
self.test_num += 1
def test3_glm_grid_search(self):
"""
This test is used to test GridSearch with the following parameters:
1. Lambda = best_lambda value from test2
2. alpha = [0 0.5 0.99]
3. cross-validation with nfolds = 5, fold_assignment = "Random"
We will look at the best results from the grid search and compare it with test 1
results.
:return: None
"""
print("*******************************************************************************************")
print("Test3: explores various parameter settings in training the GLM using GridSearch using solver ")
hyper_parameters = {'alpha': [0, 0.5, 0.99]} # set hyper_parameters for grid search
# train H2O GLM model with grid search
model_h2o_grid_search = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda,
nfolds=5, fold_assignment='Random'),
hyper_parameters)
model_h2o_grid_search.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data_grid)
# print out the model sequence ordered by the best MSE values, thanks Ludi!
temp_model = model_h2o_grid_search.sort_by("mse(xval=True)")
# obtain the model ID of best model (with smallest MSE) and use that for our evaluation
best_model_id = temp_model['Model Id'][0]
self.best_grid_mse = temp_model['mse(xval=True)'][0]
self.best_alpha = model_h2o_grid_search.get_hyperparams(best_model_id)
best_model = h2o.get_model(best_model_id)
best_model_test_metrics = best_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
(_, _, _, _, _, _, self.test_failed) =\
pyunit_utils.extract_comparison_attributes_and_print(best_model, best_model_test_metrics, "\nTest3 Done!",
False, False, False, self.test1_weight, None,
self.test1_mse_train, self.test1_mse_test,
"Comparing intercept and weights ....",
"H2O best model from gridsearch intercept "
"and weights: ",
"H2O test1 template intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!", "", "", "",
"", "", "Comparing training MSEs ....",
"H2O best model from gridsearch training MSE: ",
"H2O Test1 template training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....",
"H2O best model from gridsearch test MSE: ",
"H2O Test1 template test MSE: ",
"Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff,
self.noise_var, False)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test_glm_grid_search_over_params",
num_test_failed, self.test_failed)
self.test_num += 1
def test4_glm_remove_collinear_columns(self):
"""
With the best parameters obtained from test 3 grid search, we will trained GLM
with duplicated columns and enable remove_collinear_columns and see if the
algorithm catches the duplicated columns. We will compare the results with test
1 results.
"""
print("*******************************************************************************************")
print("Test4: test the GLM remove_collinear_columns.")
# read in training data sets with duplicated columns
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_duplicate))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_duplicate))
y_index = training_data.ncol-1
x_indices = list(range(y_index))
print("Best lambda is {0}, best alpha is {1}".format(self.best_lambda, self.best_alpha))
# train H2O model with remove_collinear_columns=True
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, alpha=self.best_alpha,
remove_collinear_columns=True)
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
# evaluate model over test data set
model_h2o_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
(_, _, _, _, _, _, self.test_failed) = \
pyunit_utils.extract_comparison_attributes_and_print(model_h2o, model_h2o_metrics, "\nTest4 Done!", False,
False, False, self.test1_weight, None,
self.test1_mse_train, self.test1_mse_test,
"Comparing intercept and weights ....",
"H2O remove_collinear_columns intercept and "
"weights: ",
"H2O test1 template intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!", "", "", "",
"", "", "Comparing training MSEs ....",
"H2O remove_collinear_columns training MSE: ",
"H2O Test1 template training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....",
"H2O remove_collinear_columns test MSE: ",
"H2O Test1 template test MSE: ",
"Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff, self.noise_var,
False)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test4_glm_remove_collinear_columns",
num_test_failed, self.test_failed)
self.test_num += 1
def test5_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before are used. However, we
go into the predictor matrix and randomly decide to change a value to be
nan and create missing values. Since no regularization is enabled in this case,
we are able to calculate a theoretical weight/p-values/MSEs where we can
compare our H2O models with.
"""
print("*******************************************************************************************")
print("Test5: test the GLM with imputation of missing values with column averages.")
# get theoretical weights, p-values and mse
try:
(weight_theory, p_values_theory, mse_train_theory, mse_test_theory) = \
self.theoretical_glm(self.training_data_file_nans, self.test_data_file_nans, False, False)
except:
print("Bad dataset, lin-alg package problem.")
sys.exit(0)
# import training set and test set
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_nans))
# train H2O models with missing_values_handling="MeanImputation"
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0, compute_p_values=True,
missing_values_handling="MeanImputation", standardize=False)
model_h2o.train(x=self.x_indices, y=self.y_index, training_frame=training_data)
# calculate H2O model performance with test data set
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
(_, _, _, _, _, _, self.test_failed) =\
pyunit_utils.extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, "\nTest5 Done!",
True, True, True, weight_theory, p_values_theory,
mse_train_theory, mse_test_theory,
"Comparing intercept and weights ....",
"H2O missing values intercept and weights: ",
"Theoretical intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!",
"Comparing p-values ....",
"H2O missing values p-values: ",
"Theoretical p-values: ", "P-values are not equal!",
"P-values are close enough!",
"Comparing training MSEs ....",
"H2O missing values training MSE: ",
"Theoretical training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....",
"H2O missing values test MSE: ",
"Theoretical test MSE: ", "Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff, self.noise_var,
False)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test5_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test6_enum_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
Y = W^T * X+ E as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Since no regularization
is enabled in this case, we are able to calculate a theoretical weight/p-values/MSEs
where we can compare our H2O models with.
"""
# no regularization in this case, use reference level plus one-hot-encoding
print("*******************************************************************************************")
print("Test6: test the GLM with enum/real values.")
try:
# get theoretical weights, p-values and mse
(weight_theory, p_values_theory, mse_train_theory, mse_test_theory) =\
self.theoretical_glm(self.training_data_file_enum_nans, self.test_data_file_enum_nans, True, False)
except:
print("Bad data set. Problem with lin-alg.")
sys.exit(0)
# import training set and test set with missing values
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col-1
x_indices = list(range(y_index))
# generate H2O model
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0, compute_p_values=True,
missing_values_handling="MeanImputation")
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
(_, _, _, _, _, _, self.test_failed) =\
pyunit_utils.extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, "\nTest6 Done!",
True, False, False, weight_theory, p_values_theory,
mse_train_theory, mse_test_theory,
"Comparing intercept and weights with enum and "
"missing values....",
"H2O enum missing values no regularization "
"intercept and weights: ",
"Theoretical intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!",
"Comparing p-values ....",
"H2O enum missing values no regularization p-values: ",
"Theoretical p-values: ", "P-values are not equal!",
"P-values are close enough!",
"Comparing training MSEs ....",
"H2O enum missing values no regularization "
"training MSE: ",
"Theoretical training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....",
"H2O enum missing values no regularization test MSE: ",
"Theoretical test MSE: ", "Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff, self.noise_var,
False, attr3_bool=False)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test6_enum_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test7_missing_enum_values_lambda_search(self):
"""
Test parameter missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
Y = W^T * X+ E as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Lambda-search will be
enabled with alpha set to 0.5. Since the encoding is different in this case
than in test6, we will compute a theoretical weights/MSEs and compare the best H2O
model MSEs with theoretical calculations and hope that they are close.
"""
# perform lambda_search, regularization and one hot encoding.
print("*******************************************************************************************")
print("Test7: test the GLM with imputation of missing enum/real values under lambda search.")
try:
# get theoretical weights, p-values and mse
(weight_theory, p_values_theory, mse_train_theory, mse_test_theory) =\
self.theoretical_glm(self.training_data_file_enum_nans_true_one_hot,
self.test_data_file_enum_nans_true_one_hot, True, True,
validation_data_file=self.validation_data_file_enum_nans_true_one_hot)
except:
print("Bad data set. Problem with lin-alg.")
sys.exit(0)
# import training set and test set with missing values and true one hot encoding
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans_true_one_hot))
validation_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file_enum_nans_true_one_hot))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans_true_one_hot))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
validation_data[ind] = validation_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col-1
x_indices = list(range(y_index))
# train H2O model
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20, missing_values_handling="MeanImputation")
model_h2o_0p5.train(x=x_indices, y=y_index, training_frame=training_data,
validation_frame=validation_data)
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=test_data)
num_test_failed = self.test_failed
(_, _, _, _, _, _, self.test_failed) =\
pyunit_utils.extract_comparison_attributes_and_print(model_h2o_0p5, h2o_model_0p5_test_metrics,
"\nTest7 Done!", False, False, True, weight_theory,
None, mse_train_theory, mse_test_theory,
"Comparing intercept and weights with categorical "
"columns, missing values and lambda search....",
"H2O enum missing values and lambda search "
"intercept and weights: ",
"Theoretical intercept and weights: ",
"Intercept and weights are not equal!",
"Intercept and weights are close enough!",
"Comparing p-values ....",
"H2O enum missing valuesand lambda search "
"p-values: ",
"Theoretical p-values: ", "P-values are not equal!",
"P-values are close enough!",
"Comparing training MSEs ....",
"H2O enum missing values and lambda search "
"training MSE: ",
"Theoretical training MSE: ",
"Training MSEs are not equal!",
"Training MSEs are close enough!",
"Comparing test MSEs ....",
"H2O enum missing values and lambda search test MSE: ",
"Theoretical test MSE: ", "Test MSEs are not equal!",
"Test MSEs are close enough!", self.test_failed,
self.ignored_eps, self.allowed_diff, self.noise_var,
False, attr3_bool=False)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += \
pyunit_utils.show_test_results("test7_missing_enum_values_lambda_search", num_test_failed, self.test_failed)
self.test_num += 1
def theoretical_glm(self, training_data_file, test_data_file, has_categorical, true_one_hot,
validation_data_file=""):
"""
This function is written to load in a training/test data sets with predictors followed by the response
as the last column. We then calculate the weights/bias and the p-values using derived formulae
off the web.
:param training_data_file: string representing the training data set filename
:param test_data_file: string representing the test data set filename
:param has_categorical: bool indicating if the data set contains mixed predictors (both enum and real)
:param true_one_hot: bool True: true one hot encoding is used. False: reference level plus one hot
encoding is used
:param validation_data_file: optional string, denoting validation file so that we can concatenate
training and validation data sets into a big training set since H2O model is using a training
and a validation data set.
:return: a tuple containing weights, p-values, training data set MSE and test data set MSE
"""
# read in the training data
training_data_xy = np.asmatrix(np.genfromtxt(training_data_file, delimiter=',', dtype=None))
test_data_xy = np.asmatrix(np.genfromtxt(test_data_file, delimiter=',', dtype=None))
if len(validation_data_file) > 0: # validation data set exist and add it to training_data
temp_data_xy = np.asmatrix(np.genfromtxt(validation_data_file, delimiter=',', dtype=None))
training_data_xy = np.concatenate((training_data_xy, temp_data_xy), axis=0)
# if predictor contains categorical data, perform imputation during encoding of enums to binary bits
if has_categorical:
training_data_xy = pyunit_utils.encode_enum_dataset(training_data_xy, self.enum_level_vec,
self.enum_col, true_one_hot, np.any(training_data_xy))
test_data_xy = pyunit_utils.encode_enum_dataset(test_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
# replace missing values with column mean before proceeding for training/test data sets
if np.isnan(training_data_xy).any():
inds = np.where(np.isnan(training_data_xy))
col_means = np.asarray(np.nanmean(training_data_xy, axis=0))[0]
training_data_xy[inds] = np.take(col_means, inds[1])
if np.isnan(test_data_xy).any():
# replace the actual means with column means from training
inds = np.where(np.isnan(test_data_xy))
test_data_xy = pyunit_utils.replace_nan_with_mean(test_data_xy, inds, col_means)
(num_row, num_col) = training_data_xy.shape
dof = num_row - num_col # degree of freedom in t-distribution
response_y = training_data_xy[:, num_col-1]
training_data = training_data_xy[:, range(0, num_col-1)]
# generate weight vector W = (X^T*X)^(-1)*X^T*Y
# form the X matrix here
temp_ones = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones, training_data), axis=1)
mat_inv = np.linalg.pinv(x_mat.transpose()*x_mat)
t_weights = mat_inv*x_mat.transpose()*response_y
# calculate training data MSE here
t_predict_y = x_mat*t_weights
delta = t_predict_y-response_y
mse_train = delta.transpose()*delta
# calculate 2-sided p-values here
mysd = mse_train/dof
se = np.sqrt(mysd*np.diag(mat_inv))
tval = abs(t_weights.transpose()/se) # ensure floating point division here
p_values = scipy.stats.t.sf(tval, dof)*2
# calculate test data MSE
test_response_y = test_data_xy[:, num_col-1]
test_data = test_data_xy[:, range(0, num_col-1)]
t_predict = pyunit_utils.generate_response_glm(t_weights, test_data, 0, self.family)
(num_row_t, num_col_t) = test_data.shape
temp = t_predict-test_response_y
mse_test = temp.transpose()*temp/num_row_t # test data MSE
return np.array(t_weights.transpose())[0].tolist(), np.array(p_values)[0].tolist(), mse_train[0, 0]/num_row, \
mse_test[0, 0]
def test_glm_gaussian():
"""
Create and instantiate TestGLMGaussian class and perform tests specified for GLM
Gaussian family.
:return: None
"""
test_glm_gaussian = TestGLMGaussian()
test_glm_gaussian.test1_glm_and_theory()
test_glm_gaussian.test2_glm_lambda_search()
test_glm_gaussian.test3_glm_grid_search()
test_glm_gaussian.test4_glm_remove_collinear_columns()
test_glm_gaussian.test5_missing_values()
test_glm_gaussian.test6_enum_missing_values()
test_glm_gaussian.test7_missing_enum_values_lambda_search()
test_glm_gaussian.teardown()
sys.stdout.flush()
if test_glm_gaussian.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_gaussian)
else:
test_glm_gaussian()
| mathemage/h2o-3 | h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_large.py | Python | apache-2.0 | 64,811 | [
"Gaussian"
] | b3da5e479bb174f5bd96966fa9450e6c4f6da6be8b255b1d6ddee70f0ed766fd |
# Copyright 2007 by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with GenePop.
See http://wbiomed.curtin.edu.au/genepop/ , the format is documented
here: http://wbiomed.curtin.edu.au/genepop/help_input.html .
Classes:
Record Holds GenePop data.
Functions:
read Parses a GenePop record (file) into a Record object.
Partially inspired on MedLine Code.
"""
from copy import deepcopy
def get_indiv(line):
def int_no_zero(val):
v = int(val)
if v == 0:
return None
return v
indiv_name, marker_line = line.split(',')
markers = marker_line.replace('\t', ' ').split(' ')
markers = [marker for marker in markers if marker!='']
if len(markers[0]) in [2, 4]: #2 digits per allele
marker_len = 2
else:
marker_len = 3
try:
allele_list = [(int_no_zero(marker[0:marker_len]),
int_no_zero(marker[marker_len:]))
for marker in markers]
except ValueError: #Haploid
allele_list = [(int_no_zero(marker[0:marker_len]),)
for marker in markers]
return indiv_name, allele_list, marker_len
def read(handle):
"""Parses a handle containing a GenePop file.
handle is a file-like object that contains a GenePop record.
"""
record = Record()
record.comment_line = str(handle.next()).rstrip()
#We can now have one loci per line or all loci in a single line
#separated by either space or comma+space...
#We will remove all commas on loci... that should not be a problem
sample_loci_line = str(handle.next()).rstrip().replace(',', '')
all_loci = sample_loci_line.split(' ')
record.loci_list.extend(all_loci)
for line in handle:
line = line.rstrip()
if line.upper()=='POP':
break
record.loci_list.append(line)
else:
raise ValueError('No population data found, file probably not GenePop related')
record.populations.append([])
for line in handle:
line = line.rstrip()
if line.upper()=='POP':
record.populations.append([])
else:
indiv_name, allele_list, record.marker_len = get_indiv(line)
record.populations[-1].append((indiv_name, allele_list))
loci = record.loci_list
for pop in record.populations:
record.pop_list.append(pop[-1][0])
for indiv in pop:
for mk_i in range(len(loci)):
mk_orig = indiv[1][mk_i]
mk_real = []
for al in mk_orig:
if al == 0:
mk_real.append(None)
else:
mk_real.append(al)
indiv[1][mk_i] = tuple(mk_real)
return record
class Record(object):
"""Holds information from a GenePop record.
Members:
marker_len The marker length (2 or 3 digit code per allele).
comment_line Comment line.
loci_list List of loci names.
pop_list List of population names.
populations List of population data.
In most genepop files, the population name is not trustable.
It is strongly recommended that populations are referred by index.
populations has one element per population. Each element is itself
a list of individuals, each individual is a pair composed by individual
name and a list of alleles (2 per marker or 1 for haploids): Example
[
[
('Ind1', [(1,2), (3,3), (200,201)],
('Ind2', [(2,None), (3,3), (None,None)],
],
[
('Other1', [(1,1), (4,3), (200,200)],
]
]
"""
def __init__(self):
self.marker_len = 0
self.comment_line = ""
self.loci_list = []
self.pop_list = []
self.populations = []
def __str__(self):
"""Returns (reconstructs) a GenePop textual representation.
"""
rep = [self.comment_line + '\n']
rep.append('\n'.join(self.loci_list) + '\n')
for pop in self.populations:
rep.append('Pop\n')
for indiv in pop:
name, markers = indiv
rep.append(name)
rep.append(',')
for marker in markers:
rep.append(' ')
for al in marker:
if al == None:
al = '0'
aStr = str(al)
while len(aStr)<self.marker_len:
aStr = "".join(['0', aStr])
rep.append(aStr)
rep.append('\n')
return "".join(rep)
def split_in_pops(self, pop_names):
"""Splits a GP record in a dictionary with 1 pop per entry.
Given a record with n pops and m loci returns a dictionary
of records (key pop_name) where each item is a record
with a single pop and m loci.
Parameters:
pop_names - Population names
"""
gp_pops = {}
for i in range(len(self.populations)):
gp_pop = Record()
gp_pop.marker_len = self.marker_len
gp_pop.comment_line = self.comment_line
gp_pop.loci_list = deepcopy(self.loci_list)
gp_pop.populations = [deepcopy(self.populations[i])]
gp_pops[pop_names[i]] = gp_pop
return gp_pops
def split_in_loci(self, gp):
"""Splits a GP record in a dictionary with 1 locus per entry.
Given a record with n pops and m loci returns a dictionary
of records (key locus name) where each item is a record
with a single locus and n pops.
"""
gp_loci = {}
for i in range(len(self.loci_list)):
gp_pop = Record()
gp_pop.marker_len = self.marker_len
gp_pop.comment_line = self.comment_line
gp_pop.loci_list = [self.loci_list[i]]
gp_pop.populations = []
for pop in self.populations:
my_pop = []
for indiv in pop:
my_pop.append((indiv[0], [indiv[1][i]]))
gp_pop.populations.append(my_pop)
gp_loci[gp_pop.loci_list[0]] = gp_pop
return gp_loci
def remove_population(self, pos):
"""Removes a population (by position).
"""
del self.populations[pos]
def remove_locus_by_position(self, pos):
"""Removes a locus by position.
"""
del self.loci_list[pos]
for pop in self.populations:
for indiv in pop:
name, loci = indiv
del loci[pos]
def remove_locus_by_name(self, name):
"""Removes a locus by name.
"""
for i in range(len(self.loci_list)):
if self.loci_list[i] == name:
self.remove_locus_by_position(i)
return
#If here than locus not existent... Maybe raise exception?
# Although it should be Ok... Just a boolean return, maybe?
| bryback/quickseq | genescript/Bio/PopGen/GenePop/__init__.py | Python | mit | 7,348 | [
"Biopython"
] | 5f9edfbbe7330c4ad17db731e3b15b0109e6dd80c005dec01754a9cda755c3d9 |
import numpy as np
from ..FileIO import FileIO as psopen
from ...common import pandas
#import pysal_examples
from ... import examples as pysal_examples
import unittest as ut
PANDAS_EXTINCT = pandas is None
class Test_Table(ut.TestCase):
def setUp(self):
self.filehandler = psopen(pysal_examples.get_path('columbus.dbf'))
self.df = self.filehandler.to_df()
self.filehandler.seek(0)
self.shapefile = psopen(pysal_examples.get_path('columbus.shp'))
self.csvhandler = psopen(pysal_examples.get_path('usjoin.csv'))
self.csv_df = self.csvhandler.to_df()
self.csvhandler.seek(0)
@ut.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_to_df(self):
for column in self.csv_df.columns:
if column.lower() == 'name':
continue
np.testing.assert_allclose(self.csvhandler.by_col(column),
self.csv_df[column].values)
for column in self.df.columns:
if column == 'geometry':
continue
np.testing.assert_allclose(self.filehandler.by_col(column),
self.df[column])
| sjsrey/pysal_core | pysal_core/io/tests/test_Tables.py | Python | bsd-3-clause | 1,188 | [
"COLUMBUS"
] | 1b51d7fe05ab0b203cd04aac287bfef827f121d3d76be907b933c59eacedd067 |
# -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(7654321)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l), 'red'),
(tp.FindNearestElement(l, [4., 5.]), 'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc), 'red'),
(tp.FindNearestElement(lpbc, [4., 5.]), 'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0, 0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5, 1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True,
showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer(
{'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11., 11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc, projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2),
yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left': [-2., -1., -1.],
'upper_right': [2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5, 1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5, 1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos is None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25., 0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25., 0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',
rpos=[25., 0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',
rpos=[25., 0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs, tgts))
ax.hist(dist, bins=50, histtype='stepfilled', normed=True)
r = np.arange(0., 0.51, 0.01)
plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3,
zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2., 2.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrn_layer = tp.CreateLayer({'rows': 20,
'columns': 20,
'elements': 'iaf_psc_alpha'})
stim = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrn_layer, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrn_layer, rec, cdict_rec)
#{ end #}
# ----------------------------
#{ conn11 #}
rec = nest.Create('spike_detector')
nrns = nest.GetLeaves(nrn_layer, local_only=True)[0]
nest.Connect(nrns, rec)
#{ end #}
| hesam-setareh/nest-simulator | topology/doc/user_manual_scripts/connections.py | Python | gpl-2.0 | 18,562 | [
"Gaussian"
] | 021635a1ae20c557b80bba0a5784aaabe592046d98acad0c4f57579aef976384 |
""" Tornado-based HTTPs JobManager service.
"""
from DIRAC import gLogger
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
from DIRAC.WorkloadManagementSystem.Service.JobManagerHandler import JobManagerHandlerMixin
sLog = gLogger.getSubLogger(__name__)
class TornadoJobManagerHandler(JobManagerHandlerMixin, TornadoService):
log = sLog
def initializeRequest(self):
self.diracSetup = self.get_argument("clientSetup")
return JobManagerHandlerMixin.initializeRequest(self)
| DIRACGrid/DIRAC | src/DIRAC/WorkloadManagementSystem/Service/TornadoJobManagerHandler.py | Python | gpl-3.0 | 518 | [
"DIRAC"
] | e3e181345604e679dd4ef08d0ac15bd1f0947b18b0a932bdb49048d1c217e392 |
#!/usr/bin/env python
"""Script to run Selenium2Library acceptance tests.
Tests are executed using Robot Framework and results verified automatically
afterwards using `robotstatuschecker` tool. The tool can be installed using
`pip install robotstatuschecker` and more information about it can be found
from: https://github.com/robotframework/statuschecker/. Notice that initially
some tests may fail.
When running test by using browser from Sauce labs, it is required that the
Sauce Connect is used. The Sauce Connect allows the browser from Sauce Labs
reach the acceptance test web server. The acceptance test uses tunnel with
name `localtunnel` and therefore when establishing the Sauce Connect tunnel
use the following command:
`bin/sc -u YOUR_USERNAME -k YOUR_ACCESS_KEY -i localtunnel`
More details and to downlaod Sauce Connect visit:
https://wiki.saucelabs.com/display/DOCS/High+Availability+Sauce+Connect+Setup
It is possible to pass Robot Framework command line arguments to the test
execution as last arguments to the `run_tests.py` command. It is
recommended to use arguments to select required suite or test for the
execution when developing new functionality for the library. Example like
--test, --suite, --include and --exclude.
Examples:
run_tests.py chrome
run_tests.py --interpreter jython firefox --suite javascript
run_tests.py chrome --sauceusername your_username --saucekey account_key --suite javascript
"""
from __future__ import print_function
from contextlib import contextmanager
import os
import sys
import argparse
import textwrap
import subprocess
from robot import rebot_cli
try:
import robotstatuschecker
except ImportError:
sys.exit('Required `robotstatuschecker` not installed.\n'
'Install it with `pip install robotstatuschecker`.')
from run_unit_tests import run_unit_tests
# Folder settings
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
ACCEPTANCE_TEST_DIR = os.path.join(ROOT_DIR, "acceptance")
RESOURCES_DIR = os.path.join(ROOT_DIR, "resources")
RESULTS_DIR = os.path.join(ROOT_DIR, "results")
SRC_DIR = os.path.normpath(os.path.join(ROOT_DIR, "..", "src"))
TEST_LIBS_DIR = os.path.join(RESOURCES_DIR, "testlibs")
HTTP_SERVER_FILE = os.path.join(RESOURCES_DIR, "testserver", "testserver.py")
# Travis settins for pull request
TRAVIS = os.environ.get("TRAVIS", False)
TRAVIS_EVENT_TYPE = os.environ.get("TRAVIS_EVENT_TYPE", None)
TRAVIS_JOB_NUMBER = os.environ.get("TRAVIS_JOB_NUMBER", "localtunnel")
SAUCE_USERNAME = os.environ.get("SAUCE_USERNAME", None)
SAUCE_ACCESS_KEY = os.environ.get("SAUCE_ACCESS_KEY", None)
ROBOT_OPTIONS = [
'--doc', 'Selenium2Library acceptance tests with {browser}',
'--outputdir', RESULTS_DIR,
'--variable', 'BROWSER:{browser}',
'--report', 'NONE',
'--log', 'NONE',
'--loglevel', 'DEBUG',
'--pythonpath', SRC_DIR,
'--pythonpath', TEST_LIBS_DIR
]
REBOT_OPTIONS = [
'--outputdir', RESULTS_DIR,
'--critical', 'regression',
'--noncritical', 'inprogress',
'--noncritical', 'known issue {browser}',
]
def unit_tests():
print('Running unit tests')
failures = run_unit_tests()
if failures:
print('\nUnit tests failed! Not running acceptance tests.')
sys.exit(failures)
def acceptance_tests(interpreter, browser, rf_options=[],
sauce_username=None, sauce_key=None):
if not os.path.exists(RESULTS_DIR):
os.mkdir(RESULTS_DIR)
with http_server():
execute_tests(interpreter, browser, rf_options,
sauce_username, sauce_key)
failures = process_output(browser, rf_options)
if failures:
print('\n{} critical test{} failed.'
.format(failures, 's' if failures != 1 else ''))
else:
print('\nAll critical tests passed.')
sys.exit(failures)
@contextmanager
def http_server():
serverlog = open(os.path.join(RESULTS_DIR, 'serverlog.txt'), 'w')
process = subprocess.Popen(['python', HTTP_SERVER_FILE, 'start'],
stdout=serverlog, stderr=subprocess.STDOUT)
try:
yield
finally:
subprocess.call(['python', HTTP_SERVER_FILE, 'stop'])
process.wait()
serverlog.close()
def execute_tests(interpreter, browser, rf_options, sauce_username, sauce_key):
options = []
runner = interpreter.split() + ['-m', 'robot.run']
options.extend([opt.format(browser=browser) for opt in ROBOT_OPTIONS])
options += rf_options
if sauce_username and sauce_key:
options.extend(get_sauce_conf(browser, sauce_username, sauce_key))
command = runner
command += options + [ACCEPTANCE_TEST_DIR]
log_start(command, sauce_username, sauce_key)
syslog = os.path.join(RESULTS_DIR, 'syslog.txt')
subprocess.call(
command, env=dict(os.environ, ROBOT_SYSLOG_FILE=syslog)
)
def log_start(command_list, *hiddens):
command = subprocess.list2cmdline(command_list)
for hidden in hiddens:
if hidden:
command = command.replace(hidden, '*' * len(hidden))
print()
print('Starting test execution with command:')
print(command)
def get_sauce_conf(browser, sauce_username, sauce_key):
if browser == 'chrome' and TRAVIS:
return []
return [
'--variable', 'SAUCE_USERNAME:{}'.format(sauce_username),
'--variable', 'SAUCE_ACCESS_KEY:{}'.format(sauce_key),
'--variable',
'REMOTE_URL:http://{}:{}@ondemand.saucelabs.com:80/wd/hub'.format(
sauce_username, sauce_key
),
'--variable',
'DESIRED_CAPABILITIES:build:{0}-{1},tunnel-identifier:{0}'.format(
TRAVIS_JOB_NUMBER, browser
)
]
def process_output(browser, rf_options):
print('Verifying results...')
options = []
output = os.path.join(RESULTS_DIR, 'output.xml')
robotstatuschecker.process_output(output, verbose=False)
options.extend([opt.format(browser=browser) for opt in REBOT_OPTIONS])
options += rf_options
try:
rebot_cli(options + [output])
except SystemExit as exit:
return exit.code
def sauce_credentials(sauce_username, sauce_key):
if TRAVIS and not sauce_username and not sauce_key:
username = SAUCE_USERNAME
key = SAUCE_ACCESS_KEY
else:
username = sauce_username
key = sauce_key
return username, key
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__.splitlines()[0],
formatter_class=argparse.RawTextHelpFormatter,
epilog='\n'.join(__doc__.splitlines()[2:])
)
parser.add_argument(
'--interpreter',
'-I',
default='python',
help=textwrap.dedent("""\
Any Python interpreter supported by the library.
E.g. `python`, `jython` or `c:\\Python27\\python.exe`.
By default set to `python`.
""")
)
parser.add_argument(
'browser',
help='Any browser supported by the library (e.g. `chrome`or `firefox`)'
)
parser.add_argument(
'--sauceusername',
'-U',
help='Username to order browser from SaucuLabs'
)
parser.add_argument(
'--saucekey',
'-K',
help='Access key to order browser from SaucuLabs'
)
args, rf_options = parser.parse_known_args()
browser = args.browser.lower().strip()
if TRAVIS and browser != 'chrome' and TRAVIS_EVENT_TYPE != 'cron':
print(
'Can not run test with browser "{}" from SauceLabs with PR.\n'
'SauceLabs can be used only when running with cron and from '
'Selenium2Library master branch, but your event type '
'was "{}". Only Chrome is supported with PR and when using '
'Travis'.format(browser, TRAVIS_EVENT_TYPE)
)
sys.exit(0)
sauce_username, sauce_key = sauce_credentials(
args.sauceusername, args.saucekey)
unit_tests()
acceptance_tests(args.interpreter, browser, rf_options,
sauce_username, sauce_key)
| SergiuTudos/Selenium2Library | test/run_tests.py | Python | apache-2.0 | 8,075 | [
"VisIt"
] | e373870e98a1dc0e5ff9ce074cfaf2009e460936675da9e3905cacec9d1d9f55 |
############################################################################
# add field index shelves to flat-file database mechanism;
# to optimize "index only" displays, use classes at end of this file;
# change browse, index, submit to use new loaders for "Index only" mode;
# minor nit: uses single lock file for all index shelve read/write ops;
# storing record copies instead of filenames in index shelves would be
# slightly faster (avoids opening flat files), but would take more space;
# falls back on original brute-force load logic for fields not indexed;
# shelve.open creates empty file if doesn't yet exist, so never fails;
# to start, create DbaseFilesIndex/{commentDB,errataDB}/indexes.lck;
############################################################################
import sys; sys.path.insert(0, '..') # check admin parent dir first
from Mutex import mutexcntl # fcntl path okay: not 'nobody'
import dbfiles, shelve, pickle, string, sys
class Dbase(mutexcntl.MutexCntl, dbfiles.Dbase):
def makeKey(self):
return self.cachedKey
def cacheKey(self): # save filename
self.cachedKey = dbfiles.Dbase.makeKey(self) # need it here too
return self.cachedKey
def indexName(self, fieldname):
return self.dirname + string.replace(fieldname, ' ', '-')
def safeWriteIndex(self, fieldname, newdata, recfilename):
index = shelve.open(self.indexName(fieldname))
try:
keyval = newdata[fieldname] # recs have all fields
reclist = index[keyval] # fetch, mod, rewrite
reclist.append(recfilename) # add to current list
index[keyval] = reclist
except KeyError:
index[keyval] = [recfilename] # add to new list
def safeLoadKeysList(self, fieldname):
if fieldname in self.indexfields:
keys = shelve.open(self.indexName(fieldname)).keys()
keys.sort()
else:
keys, index = self.loadIndexedTable(fieldname)
return keys
def safeLoadByKey(self, fieldname, fieldvalue):
if fieldname in self.indexfields:
dbase = shelve.open(self.indexName(fieldname))
try:
index = dbase[fieldvalue]
reports = []
for filename in index:
pathname = self.dirname + filename + '.data'
reports.append(pickle.load(open(pathname, 'r')))
return reports
except KeyError:
return []
else:
key, index = self.loadIndexedTable(fieldname)
try:
return index[fieldvalue]
except KeyError:
return []
# top-level interfaces (plus dbcommon and dbfiles)
def writeItem(self, newdata):
# extend to update indexes
filename = self.cacheKey()
dbfiles.Dbase.writeItem(self, newdata)
for fieldname in self.indexfields:
self.exclusiveAction(self.safeWriteIndex,
fieldname, newdata, filename)
def loadKeysList(self, fieldname):
# load field's keys list only
return self.sharedAction(self.safeLoadKeysList, fieldname)
def loadByKey(self, fieldname, fieldvalue):
# load matching recs lisy only
return self.sharedAction(self.safeLoadByKey, fieldname, fieldvalue)
class DbaseErrata(Dbase):
dirname = 'DbaseFilesIndexed/errataDB/'
filename = dirname + 'indexes'
indexfields = ['Submitter name', 'Submit date', 'Report state']
class DbaseComment(Dbase):
dirname = 'DbaseFilesIndexed/commentDB/'
filename = dirname + 'indexes'
indexfields = ['Submitter name', 'Report state'] # index just these
#
# self-test
#
if __name__ == '__main__':
import os
dbase = DbaseComment()
os.system('rm %s*' % dbase.dirname) # empty dbase dir
os.system('echo > %s.lck' % dbase.filename) # init lock file
# 3 recs; normally have submitter-email and description, not page
# submit-date and report-state are added auto by rec store method
records = [{'Submitter name': 'Bob', 'Page': 38, 'Submit mode': ''},
{'Submitter name': 'Brian', 'Page': 40, 'Submit mode': ''},
{'Submitter name': 'Bob', 'Page': 42, 'Submit mode': 'email'}]
for rec in records: dbase.storeItem(rec)
dashes = '-'*80
def one(item):
print dashes; print item
def all(list):
print dashes
for x in list: print x
one('old stuff')
all(dbase.loadSortedTable('Submitter name')) # load flat list
all(dbase.loadIndexedTable('Submitter name')) # load, grouped
#one(dbase.loadIndexedTable('Submitter name')[0])
#all(dbase.loadIndexedTable('Submitter name')[1]['Bob'])
#all(dbase.loadIndexedTable('Submitter name')[1]['Brian'])
one('new stuff')
one(dbase.loadKeysList('Submitter name')) # bob, brian
all(dbase.loadByKey('Submitter name', 'Bob')) # two recs match
all(dbase.loadByKey('Submitter name', 'Brian')) # one rec mathces
one(dbase.loadKeysList('Report state')) # all match
all(dbase.loadByKey('Report state', 'Not yet verified'))
one('boundary cases')
all(dbase.loadByKey('Submit mode', '')) # not indexed: load
one(dbase.loadByKey('Report state', 'Nonesuch')) # unknown value: []
try: dbase.loadByKey('Nonesuch', 'Nonesuch') # bad fields: exc
except: print 'Nonesuch failed'
| simontakite/sysadmin | pythonscripts/programmingpython/Internet/Web/PyErrata/AdminTools/dbaseindexed.py | Python | gpl-2.0 | 5,932 | [
"Brian"
] | 0bb55e91f84a1097e626d6115bc80d9516d5bbdb9be6c30f81f41d50edd0b36d |
# -*- coding: utf-8 -*-
'''
Smoothtest
Copyright (c) 2014 Juju. Inc
Code Licensed under MIT License. See LICENSE file.
'''
import rel_imp
rel_imp.init()
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from .base import AutoTestBase
from .Main import Main
from .TestSearcher import TestSearcher
import sys
from smoothtest.base import CommandBase, is_valid_file, is_file_or_dir
class Command(AutoTestBase, CommandBase):
def get_epilog(self, cmd_name='autotest'):
return '''
== Autotest Guide
For an detailed introduction in the usage of the autotest command, plase visit:
https://github.com/joaduo/smoothtest/blob/master/smoothtest/autotest/AutotestGuide.md
Or the bundled file smoothtest/autotest/AutotestGuide.md with this installation
== Smoothtest configuration
For the options that smoothtest accepts you can consult the file at:
https://github.com/joaduo/smoothtest/blob/master/smoothtest/settings/default.py
Or file smoothtest/settings/default.py bundled in this installation
== Example commmands
{cmd_name} # starts autotest CLI
{cmd_name} -t <path to test_file.py> # autotest CLI with that test selected
{cmd_name} -t <python.like.module.path.to.test> # same as above but using dot paths
'''.format(**locals())
def get_parser(self):
parser = ArgumentParser(description='Automatically runs (unit) '
'tests upon code/file changes.',
formatter_class=RawDescriptionHelpFormatter,
epilog=self.get_epilog())
parser.add_argument(
'-t',
'--tests',
type=is_valid_file,
help='Tests\' files or modules path to to be monitored.'
' Changes on these files trigger a reload of those same modules '
'and rerunning the tests they contain.',
default=[],
nargs='*')
parser.add_argument(
'-r',
'--methods-regex',
type=str,
help='Specify regex for Methods matching',
default='')
parser.add_argument(
'-n',
'--no-ipython',
help='Do not embed ipython'
' interactive shell as UI.',
default=False,
action='store_true')
parser.add_argument(
'--smoke',
help='Do not run tests. Simply test'
' the whole monitoring system',
default=None,
action='store_true')
parser.add_argument(
'-F',
'--full-reloads',
type=is_file_or_dir,
help='Files or directories to be monitored. They will trigger '
'reloading all files involved and rerunning tests.',
default=[],
nargs='+')
parser.add_argument(
'-m',
'--fnmatch',
type=str,
help='Fnmatch '
'pattern to filter files in full reloads directories.'
' (default=*.py)',
default='*.py')
self._add_smoothtest_common_args(parser)
return parser
def get_extension_parser(self):
parser = self.get_parser()
parser.add_argument(
'-f',
'--force',
help='force reloading tests '
'(also restarting webdriver)',
default=False,
action='store_true')
parser.add_argument('-u', '--update', help='update test config',
default=False, action='store_true')
parser.add_argument('--nosmoke', help='force non-smoke mode for'
' updating', default=None, action='store_true')
return parser
def get_test_config(self, args, argv):
searcher = TestSearcher()
test_paths = set()
partial_reloads = set()
for tst in args.tests:
tst = self._path_to_modstr(tst)
paths, partial = searcher.solve_paths((tst, args.methods_regex))
if paths:
test_paths.update(paths)
partial_reloads.update(partial)
test_config = dict(test_paths=test_paths,
partial_reloads=partial_reloads,
full_reloads=args.full_reloads,
full_filter=args.fnmatch,
smoke=args.smoke,
argv=argv,
)
return test_config
def main(self, argv=None):
curdir = os.path.abspath(os.curdir)
filedir = os.path.abspath(os.path.dirname(__file__))
# Remove the dir of this file if we are not in this directory
if curdir != filedir and filedir in sys.path:
sys.path.remove(filedir)
args, unkonwn = self.get_parser().parse_known_args(argv)
self._process_common_args(args)
# Run autotest Main loop (ipython UI + subprocesses)
main = Main()
test_config = self.get_test_config(args, unkonwn)
main.run(embed_ipython=not args.no_ipython, test_config=test_config)
def smoke_test_module():
c = Command()
c.get_parser()
parser = c.get_extension_parser()
args, unkown = parser.parse_known_args([])
c.get_test_config(args, unkown)
mod_path = 'fulcrum.views.tests.home'
is_valid_file(mod_path)
is_file_or_dir(mod_path)
mod_file = 'fulcrum/views/tests/home.py'
is_valid_file(mod_file)
is_file_or_dir(mod_file)
def main(argv=None):
Command().main(argv)
if __name__ == "__main__":
main()
| joaduo/smoothtest | smoothtest/autotest/Command.py | Python | mit | 5,599 | [
"VisIt"
] | ef55971453bfe99d1f321206988d953a8921845b058c8c40445d19d02d07436a |
# Copyright 2009, Mark Fassler
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
import logging
import vtk
from jv.jvPaths import *
import xml.etree.ElementTree as ET
import xml #for error handling
import os
vtkTypes = {}
vtkTypes['Mapper'] = ['DataSetMapper', 'PolyDataMapper']
vtkTypes['Algorithm'] = ['CylinderSource', 'SphereSource', 'CubeSource', 'DiskSource',
'ConeSource', 'UnstructuredGridReader', 'PLYReader', 'PolyDataReader',
'TextureMapToPlane', 'TextureMapToSphere', 'ContourFilter',
'TransformTextureCoords', 'TransformPolyDataFilter',
'TransformFilter', 'ImplicitModeller',
'Glyph3D', 'VertexGlyphFilter', 'GlyphSource2D', 'ImplicitSum',
'SampleFunction', 'PolyDataNormals']
vtkTypes['ImageReader'] = ['BMPReader']
vtkTypes['LinearTransform'] = ['Transform']
vtkTypes['Prop3D'] = ['Actor']
vtkTypes['ImplicitFunction'] = ['Plane', 'PerlinNoise']
def coordsFromString(string):
coords = string.split(',')
x = float(coords[0])
y = float(coords[1])
z = float(coords[2])
return x, y, z
def str2floats(myString):
return map(lambda x: float(x), myString.split(","))
def str2ints(myString):
return map(lambda x: int(x), myString.split(","))
def webColorToVtkColor(string):
red = int(string[1:3], 16) / 255.
green = int(string[3:5], 16) / 255.
blue = int(string[5:7], 16) / 255.
return red, green, blue
class XML2VTK:
def __init__ (self, topElement, basedir='', bonelengths=''):
self.logger = logging.getLogger(name='XML2VTK')
self.logger.debug('__init__()')
self.actors = {}
self.assemblies = {}
self.glyphsources = {}
self.lights = {}
self.textures = {}
self.bonelengths = bonelengths
self.basedir = basedir
self.namesToFunctions = {}
self.namesToFunctions['Actor'] = self.Actor
self.namesToFunctions['Assembly'] = self.Assembly
self.namesToFunctions['BMPReader'] = self.BMPReader
self.namesToFunctions['ConeSource'] = self.ConeSource
self.namesToFunctions['ContourFilter'] = self.ContourFilter
self.namesToFunctions['CubeSource'] = self.CubeSource
self.namesToFunctions['CylinderSource'] = self.CylinderSource
self.namesToFunctions['DiskSource'] = self.DiskSource
self.namesToFunctions['DataSetMapper'] = self.DataSetMapper
self.namesToFunctions['glyph'] = self.glyph # wrapper
self.namesToFunctions['Glyph3D'] = self.Glyph3D
self.namesToFunctions['GlyphSource2D'] = self.GlyphSource2D
self.namesToFunctions['ImplicitModeller'] = self.ImplicitModeller
self.namesToFunctions['ImplicitSum'] = self.ImplicitSum
self.namesToFunctions['Light'] = self.Light
self.namesToFunctions['PerlinNoise'] = self.PerlinNoise
self.namesToFunctions['Plane'] = self.Plane
self.namesToFunctions['PLYReader'] = self.PLYReader
self.namesToFunctions['PolyDataMapper'] = self.PolyDataMapper
self.namesToFunctions['PolyDataNormals'] = self.PolyDataNormals
self.namesToFunctions['PolyDataReader'] = self.PolyDataReader
self.namesToFunctions['SampleFunction'] = self.SampleFunction
self.namesToFunctions['SphereSource'] = self.SphereSource
self.namesToFunctions['Texture'] = self.Texture
self.namesToFunctions['TextureMapToPlane'] = self.TextureMapToPlane
self.namesToFunctions['TextureMapToSphere'] = self.TextureMapToSphere
self.namesToFunctions['Transform'] = self.Transform
self.namesToFunctions['TransformPolyDataFilter'] = self.TransformPolyDataFilter
self.namesToFunctions['TransformFilter'] = self.TransformFilter
self.namesToFunctions['UnstructuredGridReader'] = self.UnstructuredGridReader
self.namesToFunctions['VertexGlyphFilter'] = self.VertexGlyphFilter
if topElement.tag == "VTKpipelines":
self.logger.debug('inside a <VTKpipelines> element')
if 'bgcolor' in topElement.keys():
self.bgcolor = webColorToVtkColor(topElement.get('bgcolor'))
# All of these first-level elements get named and placed into
# a python dictionary:
for (elemType, elemDict) in [('Texture', self.textures),
('glyph', self.glyphsources),
('Actor', self.actors),
('Assembly', self.assemblies),
('Light', self.lights)]:
for subElement in topElement.findall(elemType):
if 'name' in subElement.keys():
name = subElement.get('name')
try:
elemDict[name] = self.namesToFunctions[subElement.tag](subElement)
except:
self.logger.error('Failed to create <%s> %s' % (elemType, name))
else:
self.logger.error('First-level <%s> must have a name attribute.' % elemType)
# <glyph> is a wrapper for any kind of Algorithm-type data
def glyph(self, currentElement):
self.logger.debug(' inside a <glyph> element: "%s"' % currentElement.get('name'))
algoData = ''
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
algoData = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
else:
self.logger.error(' .. <glyph> needs an Algorithm-type childElement')
return algoData
def Texture(self, currentElement):
self.logger.debug(' inside a <Texture> element: "%s"' % currentElement.get('name'))
texture = vtk.vtkTexture()
# Datatype(s) I need for input: ImageReader
ImageReaderNode = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['ImageReader']:
ImageReaderNode = childElement
if ImageReaderNode != '':
imageReader = self.namesToFunctions[ImageReaderNode.tag](ImageReaderNode)
try:
texture.SetInputConnection(imageReader.GetOutputPort())
except:
self.logger.error(' .. <Texture> failed to SetInputConnection')
else:
self.logger.error(' .. <Texture> needs an ImageReader-type childElement.')
if 'SetRepeat' in currentElement.keys():
try:
texture.SetRepeat(int( currentElement.get('SetRepeat')))
except:
self.logger.error(' .. <Texture> failed to SetRepeat')
if 'SetInterpolate' in currentElement.keys():
try:
texture.SetInterpolate(int( currentElement.get('SetInterpolate')))
except:
self.logger.error(' .. <Texture> failed to SetInterpolate')
return texture
def Assembly(self, currentElement):
self.logger.debug(' inside an <Assembly> element: "%s"' % currentElement.get('name'))
assembly = vtk.vtkAssembly()
if 'SetPosition' in currentElement.keys():
try:
assembly.SetPosition(coordsFromString(currentElement.get('SetPosition')))
except:
self.logger.error(' .. <Assembly> failed to SetPosition')
if 'SetOrientation' in currentElement.keys():
try:
assembly.SetOrientation(coordsFromString(currentElement.get('SetOrientation')))
except:
self.logger.error(' .. <Assembly> failed to SetOrientation')
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Prop3D']:
actor = self.namesToFunctions[childElement.tag](childElement)
try:
assembly.AddPart(actor)
except:
self.logger.error(' .. <Assembly> failed to AddPart (ie, probably failed to add a childElement <Actor>)')
return assembly
def BMPReader(self, currentElement):
reader = vtk.vtkBMPReader()
try:
reader.SetFileName( os.path.join(self.basedir, currentElement.get('SetFileName')) )
except:
self.logger.error(' .. <BMPReader> failed to SetFileName')
return reader
def Actor(self, currentElement):
self.logger.debug(' inside an <Actor> element: "%s"' % currentElement.get('name'))
actor = vtk.vtkActor()
# Datatype(s) I need for input: Mapper
MapperElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Mapper']:
MapperElement = childElement
if MapperElement != '':
#self.logger.debug(' .. <Actor> setting mapper...')
mapper = self.namesToFunctions[MapperElement.tag](MapperElement)
try:
actor.SetMapper(mapper)
except:
self.logger.error(' .. <Actor> failed to SetMapper')
else:
self.logger.error(' .. <Actor> needs a Mapper-type childElement')
self.logger.debug(' .. <Actor> setting optional attributes...')
actor.SetPickable(0)
#if 'SetPickable' in currentElement.keys():
# actor.SetPickable( int(currentElement.get('SetPickable')) )
if 'href' in currentElement.keys():
actor.SetPickable(1)
actor.href = currentElement.get('href')
if 'SetPosition' in currentElement.keys():
try:
actor.SetPosition( coordsFromString(currentElement.get('SetPosition')) )
except:
self.logger.error(" .. <Actor> failed to SetPosition")
if 'SetOrientation' in currentElement.keys():
try:
actor.SetOrientation( coordsFromString(currentElement.get('SetOrientation')) )
except:
self.logger.error(" .. <Actor> failed to SetOrientation")
if 'SetScale' in currentElement.keys():
try:
actor.SetScale( coordsFromString(currentElement.get('SetScale')) )
except:
self.logger.error(" .. <Actor> failed to SetOrientation")
if 'SetOpacity' in currentElement.keys():
try:
actor.GetProperty().SetOpacity( float(currentElement.get('SetOpacity')) )
except:
self.logger.error(" .. <Actor> failed to SetOpacity")
if 'SetColor' in currentElement.keys():
try:
actor.GetProperty().SetColor( coordsFromString(currentElement.get('SetColor')) )
except:
self.logger.error(" .. <Actor> failed to SetColor")
if 'color' in currentElement.keys(): # allow for Web colors
try:
actor.color = webColorToVtkColor(currentElement.get('color'))
actor.GetProperty().SetColor(actor.color)
except:
self.logger.error(" .. <Actor> failed to set HTML-style color")
if 'hovercolor' in currentElement.keys(): # allow for Web colors
actor.hovercolor = webColorToVtkColor(currentElement.get('hovercolor'))
if 'SetTexture' in currentElement.keys():
textureName = currentElement.get('SetTexture')
if textureName in self.textures:
actor.SetTexture( self.textures[textureName] )
else:
self.logger.error(" .. <Actor> unknown texture: %s" % textureName)
self.logger.debug(' .. <Actor> done setting optional attributes.')
return actor
def Light(self, currentElement):
self.logger.debug(' inside a <Light> element: "%s"' % currentElement.get('name'))
light = vtk.vtkLight()
try:
light.SetPosition( coordsFromString(currentElement.get('SetPosition')) )
except:
self.logger.error(" .. <Light> failed to SetPosition")
try:
light.SetFocalPoint( coordsFromString(currentElement.get('SetFocalPoint')) )
except:
self.logger.error(" .. <Light> failed to SetFocalPoint")
if 'SetPositional' in currentElement.keys():
try:
light.SetPositional( int(currentElement.get('SetPositional')) )
except:
self.logger.error(" .. <Light> failed to SetPositional")
if 'SetColor' in currentElement.keys():
try:
light.SetColor( coordsFromString(currentElement.get('SetColor')) )
except:
self.logger.error(" .. <Light> failed to SetColor")
if 'color' in currentElement.keys(): # give people the option of using HTML-style color:
try:
light.SetColor( webColorToVtkColor(currentElement.get('color')) )
except:
self.logger.error(" .. <Light> failed to set HTML-style color")
if 'SetConeAngle' in currentElement.keys():
try:
light.SetConeAngle( float(currentElement.get('SetConeAngle')) )
except:
self.logger.error(" .. <Light> failed to SetConeAngle")
if 'SetIntensity' in currentElement.keys():
try:
light.SetIntensity( float(currentElement.get('SetIntensity')) )
except:
self.logger.error(" .. <Light> failed to SetIntensity")
return light
def DataSetMapper(self, currentElement):
#self.logger.debug(' .. inside a <DataSetMapper>')
mapper = vtk.vtkDataSetMapper()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
#self.logger.debug(' .. <DataSetMapper> trying to get a dataset from a %s' % AlgorithmElement.tag)
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
mapper.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(" .. <DataSetMapper> failed to SetInputConnection")
else:
self.logger.error(' .. <DataSetMapper> needs an Algorithm-type childElement')
return mapper
def VertexGlyphFilter(self, currentElement):
gFilter = vtk.vtkVertexGlyphFilter()
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
self.logger.debug('VertexGlyphFilter trying to add: %s' % (childElement.tag))
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
gFilter.SetInputConnection(dataset.GetOutputPort())
except Exception as err:
self.logger.error(" .. <VertexGlyphFilter> failed to SetInputConnection")
self.logger.error(err)
else:
self.logger.error(' .. <VertexGlyphFilter> needs an Algorithm-type childElement')
return gFilter
def GlyphSource2D(self, currentElement):
gsource = vtk.vtkGlyphSource2D()
#if 'SetGlyphType' in currentElement.keys():
gsource.SetGlyphTypeToArrow ()
if 'SetFilled' in currentElement.keys():
try:
gsource.SetFilled( int(currentElement.get('SetFilled')) )
except:
self.logger.error(' .. <GlyphSource2D> failed to SetFilled')
if 'SetScale' in currentElement.keys():
try:
gsource.SetScale( float(currentElement.get('SetScale')) )
except:
self.logger.error(' .. <GlyphSource2D> failed to SetScale')
return gsource
def PolyDataMapper(self, currentElement):
mapper = vtk.vtkPolyDataMapper()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
mapper.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(" .. <PolyDataMapper> failed to SetInputConnection")
else:
self.logger.error(' .. <PolyDataMapper> needs an Algorithm-type childElement')
return mapper
def TransformPolyDataFilter(self, currentElement):
transFilter = vtk.vtkTransformPolyDataFilter()
# Datatype(s) I need for input: Algorithm, LinearTransform
AlgorithmElement = ''
TransformElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if childElement.tag in vtkTypes['LinearTransform']:
TransformElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
transFilter.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(' .. <TransformPolyDataFilter> failed to SetInputConnection')
else:
self.logger.error(' .. <TransformPolyDataFilter> needs an Algorithm-type childElement')
if TransformElement != '':
transform = self.namesToFunctions[TransformElement.tag](TransformElement)
try:
transFilter.SetTransform(transform)
except:
self.logger.error(' .. <TransformPolyDataFilter> failed to SetTransform')
else:
self.logger.error('<TransformPolyDataFilter> needs an Transform-type childElement')
return transFilter
def TransformFilter(self, currentElement):
transFilter = vtk.vtkTransformFilter()
# Datatype(s) I need for input: Algorithm, LinearTransform
AlgorithmElement = ''
TransformElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if childElement.tag in vtkTypes['LinearTransform']:
TransformElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
transFilter.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(' .. <TransformFilter> failed to SetInputConnection')
else:
self.logger.error(' .. <TransformFilter> needs an Algorithm-type childElement')
if TransformElement != '':
transform = self.namesToFunctions[TransformElement.tag](TransformElement)
try:
transFilter.SetTransform(transform)
except:
self.logger.error(' .. <TransformFilter> failed to SetTransform')
else:
self.logger.error('<TransformFilter> needs an Transform-type childElement')
return transFilter
def Transform(self, currentElement):
transform = vtk.vtkTransform()
# TODO: preserve the order of rotations...
if 'RotateZ' in currentElement.keys():
try:
transform.RotateZ( float(currentElement.get('RotateZ')) )
except:
self.logger.error(" .. <Transform> failed to RotateZ")
if 'RotateX' in currentElement.keys():
try:
transform.RotateX( float(currentElement.get('RotateX')) )
except:
self.logger.error(" .. <Transform> failed to RotateX")
if 'RotateY' in currentElement.keys():
try:
transform.RotateY( float(currentElement.get('RotateY')) )
except:
self.logger.error(" .. <Transform> failed to RotateY")
if 'Translate' in currentElement.keys():
try:
transform.Translate( coordsFromString(currentElement.get('Translate')) )
except:
self.logger.error(' .. <Transform> failed to Translate')
if 'boneBuild' in currentElement.keys():
try:
transform.Translate(0.0, self.bonelengths[currentElement.get('boneBuild')] / 2., 0.0 )
except:
self.logger.error(' .. <Transform> failed to Translate from boneBuild')
if 'Scale' in currentElement.keys():
try:
transform.Scale( coordsFromString(currentElement.get('Scale')))
except:
self.logger.error(' .. <Transform> failed to Scale')
return transform
def CylinderSource(self, currentElement):
source = vtk.vtkCylinderSource()
try:
source.SetRadius( float(currentElement.get('SetRadius')) )
except:
self.logger.error(' .. <CylinderSource> failed to SetRadius')
if 'SetHeight' in currentElement.keys():
try:
source.SetHeight( float(currentElement.get('SetHeight')) )
except:
self.logger.error(' .. <CylinderSource> failed to SetHeight')
if 'boneLength' in currentElement.keys():
try:
source.SetHeight( self.bonelengths[currentElement.get('boneLength')] )
except:
self.logger.error(' .. <CylinderSource> failed to SetHeight from boneLength')
if 'SetResolution' in currentElement.keys():
try:
source.SetResolution( int(currentElement.get('SetResolution')) )
except:
self.logger.error(' .. <CylinderSource> failed to SetResolution')
if 'SetCapping' in currentElement.keys():
try:
source.SetCapping( int(currentElement.get('SetCapping')) )
except:
self.logger.error(' .. <CylinderSource> failed to SetCapping')
return source
def DiskSource(self, currentElement):
source = vtk.vtkDiskSource()
try:
source.SetInnerRadius( float(currentElement.get('SetInnerRadius')) )
except:
self.logger.error(' .. <DiskSource> failed to SetInnerRadius')
try:
source.SetOuterRadius( float(currentElement.get('SetOuterRadius')) )
except:
self.logger.error(' .. <DiskSource> failed to SetOuterRadius')
if 'SetRadialResolution' in currentElement.keys():
try:
source.SetRadialResolution( int(currentElement.get('SetRadialResolution')) )
except:
self.logger.error(' .. <CylinderSource> failed to SetRadialResolution')
if 'SetCircumferentialResolution' in currentElement.keys():
try:
source.SetCircumferentialResolution( int(currentElement.get('SetCircumferentialResolution')) )
except:
self.logger.error(' .. <CylinderSource> failed to SetCircumferentialResolution')
return source
def ConeSource(self, currentElement):
source = vtk.vtkConeSource()
try:
source.SetHeight( float(currentElement.get('SetHeight')) )
except:
self.logger.error(' .. <ConeSource> failed to SetHeight')
try:
source.SetRadius( float(currentElement.get('SetRadius')) )
except:
self.logger.error(' .. <ConeSource> failed to SetRadius')
if 'SetResolution' in currentElement.keys():
try:
source.SetResolution( int(currentElement.get('SetResolution')) )
except:
self.logger.error(' .. <ConeSource> failed to SetResolution')
if 'SetCenter' in currentElement.keys():
try:
source.SetCenter( coordsFromString(currentElement.get('SetCenter')) )
except:
self.logger.error(' .. <ConeSource> failed to SetCenter')
if 'SetDirection' in currentElement.keys():
try:
source.SetDirection( coordsFromString(currentElement.get('SetDirection')) )
except:
self.logger.error(' .. <ConeSource> failed to SetDirection')
return source
def CubeSource(self, currentElement):
source = vtk.vtkCubeSource()
try:
source.SetXLength( float(currentElement.get('SetXLength')) )
except:
self.logger.error(' .. <CubeSource> failed to SetXLength')
try:
source.SetYLength( float(currentElement.get('SetYLength')) )
except:
self.logger.error(' .. <CubeSource> failed to SetYLength')
try:
source.SetZLength( float(currentElement.get('SetZLength')) )
except:
self.logger.error(' .. <CubeSource> failed to SetZLength')
return source
def SphereSource(self, currentElement):
source = vtk.vtkSphereSource()
try:
source.SetRadius( float(currentElement.get('SetRadius')) )
except:
self.logger.error(' .. <SphereSource> failed to SetRadius')
if 'SetThetaResolution' in currentElement.keys():
try:
source.SetThetaResolution( int(currentElement.get('SetThetaResolution')) )
except:
self.logger.error(' .. <SphereSource> failed to SetThetaResolution')
if 'SetPhiResolution' in currentElement.keys():
try:
source.SetPhiResolution( int(currentElement.get('SetPhiResolution')) )
except:
self.logger.error(' .. <SphereSource> failed to SetPhiResolution')
if 'SetStartTheta' in currentElement.keys():
try:
source.SetStartTheta( float(currentElement.get('SetStartTheta')) )
except:
self.logger.error(' .. <SphereSource> failed to SetStartTheta')
if 'SetEndTheta' in currentElement.keys():
try:
source.SetEndTheta( float(currentElement.get('SetEndTheta')) )
except:
self.logger.error(' .. <SphereSource> failed to SetEndTheta')
return source
def UnstructuredGridReader(self, currentElement):
reader = vtk.vtkUnstructuredGridReader()
try:
reader.SetFileName(os.path.join(self.basedir, currentElement.get('SetFileName')))
except:
self.logger.error(' .. <UnstructuredGridReader> failed to SetFileName')
if 'SetVectorsName' in currentElement.keys():
try:
reader.SetVectorsName( currentElement.get('SetVectorsName') )
except:
self.logger.error(' .. <UnstructuredGridReader> failed to SetVectorsName')
return reader
def PolyDataReader(self, currentElement):
reader = vtk.vtkPolyDataReader()
try:
reader.SetFileName(os.path.join(self.basedir, currentElement.get('SetFileName')))
except:
self.logger.error(' .. <PolyDataReader> failed to SetFileName')
return reader
def PLYReader(self, currentElement):
reader = vtk.vtkPLYReader()
try:
reader.SetFileName(os.path.join(self.basedir, currentElement.get('SetFileName')))
except:
self.logger.error(' .. <PLYReader> failed to SetFileName')
return reader
def ImplicitModeller(self, currentElement):
impModeller = vtk.vtkImplicitModeller()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
self.logger.debug(" .. <ImplicitModeller> trying to SetInputConnection")
try:
impModeller.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(" .. <ImplicitModeller> failed to SetInputConnection")
else:
self.logger.error(' .. <ImplicitModeller> needs an Algorithm-type childElement')
if 'SetSampleDimensions' in currentElement.keys():
self.logger.debug(' .. <ImplicitModeller> trying to SetSampleDimensions')
try:
impModeller.SetSampleDimensions( str2ints(currentElement.get('SetSampleDimensions')) )
except:
self.logger.error(' .. <ImplicitModeller> failed to SetSampleDimensions')
if 'SetModelBounds' in currentElement.keys():
self.logger.debug(' .. <ImplicitModeller> trying to SetModelBounds')
try:
impModeller.SetModelBounds( str2floats(currentElement.get('SetModelBounds')) )
except:
self.logger.error(' .. <ImplicitModeller> failed to SetModelBounds')
if 'SetMaximumDistance' in currentElement.keys():
self.logger.debug(' .. <ImplicitModeller> trying to SetMaximumDistance')
try:
impModeller.SetMaximumDistance( float(currentElement.get('SetMaximumDistance')) )
except:
self.logger.error(' .. <ImplicitModeller> failed to SetMaximumDistance')
return impModeller
def ContourFilter(self, currentElement):
contFilt = vtk.vtkContourFilter()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
contFilt.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(" .. <ContourFilter> failed to SetInputConnection")
else:
self.logger.error(' .. <ContourFilter> needs an Algorithm-type childElement')
#if 'SetValue' in currentElement.keys():
# self.logger.debug(' .. <ContourFilter> trying to SetValue')
# try:
# contFilt.SetValue( str2floats(currentElement.get('SetValue')) )
# except:
# self.logger.error(' .. <ContourFilter> failed to SetValue')
contFilt.SetValue(0, 0.25)
return contFilt
def Glyph3D(self, currentElement):
glyph = vtk.vtkGlyph3D()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
glyph.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(' .. <Glyph3D> failed to SetInputConnection')
else:
self.logger.error(' .. <Glyph3D> needs an Algorithm-type childElement')
if 'SetSource' in currentElement.keys():
gsourceName = currentElement.get('SetSource')
try:
self.logger.debug(' .. <Glyph3D> SetSource(%s)' % gsourceName)
glyph.SetSource( self.glyphsources[gsourceName].GetOutput() )
except:
self.logger.error(' .. <Glyph3D> failed to SetSource')
glyph.SetScaleModeToScaleByVector ()
glyph.SetColorModeToColorByVector ()
glyph.SetRange(0.0, 0.11445075055913652)
glyph.SetScaleFactor(3.0)
return glyph
def TextureMapToPlane(self, currentElement):
tmapper = vtk.vtkTextureMapToPlane()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
try:
tmapper.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(' .. <TextureMapToPlane> failed to SetInputConnection')
else:
self.logger.error(' .. <TextureMapToPlane> needs an Algorithm-type childElement')
if 'SetOrigin' in currentElement.keys():
try:
tmapper.SetOrigin( coordsFromString(currentElement.get('SetOrigin')) )
except:
self.logger.error(' .. <TextureMapToPlane> failed to SetOrigin')
if 'SetPoint1' in currentElement.keys():
try:
tmapper.SetPoint1( coordsFromString(currentElement.get('SetPoint1')) )
except:
self.logger.error(' .. <TextureMapToPlane> failed to SetPoint1')
if 'SetPoint2' in currentElement.keys():
try:
tmapper.SetPoint2( coordsFromString(currentElement.get('SetPoint2')) )
except:
self.logger.error(' .. <TextureMapToPlane> failed to SetPoint2')
return tmapper
def TextureMapToSphere(self, currentElement):
tmapper = vtk.vtkTextureMapToSphere()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[childElement.tag](childElement)
try:
tmapper.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(' .. <TextureMapToSphere> failed to SetInputConnection')
if 'SetPreventSeam' in currentElement.keys():
try:
tmapper.SetPreventSeam( int(currentElement.get('SetPreventSeam')) )
except:
self.logger.error(' .. <TextureMapToSphere> failed to SetPreventSeam')
else:
self.logger.error(' .. <TextureMapToSphere> needs an Algorithm-type childnode')
return tmapper
def PerlinNoise(self, currentElement):
pNoise = vtk.vtkPerlinNoise()
try:
pNoise.SetFrequency( coordsFromString(currentElement.get('SetFrequency')) )
except:
self.logger.error(' .. <PelinNoise> failed to SetFrequency')
if 'SetThetaResolution' in currentElement.keys():
try:
pNoise.SetThetaResolution( coordsFromString(currentElement.get('SetPhase')) )
except:
self.logger.error(' .. <PelinNoise> failed to SetPhase')
return pNoise
def ImplicitSum(self, currentElement):
impSum = vtk.vtkImplicitSum()
impSum.SetNormalizeByWeight(1)
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['ImplicitFunction']:
childFunc = self.namesToFunctions[childElement.tag](childElement)
if 'weight' in childElement.keys():
childWeight = float(childElement.get('weight'))
else:
childWeight = 1.
self.logger.error(' .. <ImplicitSum> trying to AddFunction')
try:
impSum.AddFunction(childFunc, childWeight)
except:
self.logger.error(' .. <ImplicitSum> failed to AddFunction')
return impSum
def SampleFunction(self, currentElement):
sampFunc = vtk.vtkSampleFunction()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement)
self.logger.debug(' .. <SampleFunction> trying to SetImplicitFunction.')
try:
sampFunc.SetImplicitFunction(dataset)
except:
self.logger.error(' .. <SampleFunction> failed to SetImplicitFunction.')
if 'SetSampleDimensions' in currentElement.keys():
self.logger.debug(' .. <SampleFunction> trying to SetSampleDimensions')
try:
sampFunc.SetSampleDimensions( str2ints(currentElement.get('SetSampleDimensions')) )
except:
self.logger.error(' .. <SampleFunction> failed to SetSampleDimensions')
if 'SetModelBounds' in currentElement.keys():
self.logger.debug(' .. <SampleFunction> trying to SetModelBounds')
try:
sampFunc.SetModelBounds( str2floats(currentElement.get('SetModelBounds')) )
except:
self.logger.error(' .. <SampleFunction> failed to SetModelBounds')
sampFunc.ComputeNormalsOff()
return sampFunc
def PolyDataNormals(self, currentElement):
pDatNorms = vtk.vtkPolyDataNormals()
# Datatype(s) I need for input: Algorithm
AlgorithmElement = ''
for childElement in currentElement.getchildren():
if childElement.tag in vtkTypes['Algorithm']:
AlgorithmElement = childElement
if AlgorithmElement != '':
dataset = self.namesToFunctions[childElement.tag](childElement)
self.logger.error(' .. <PolyDataNormals> trying to to SetInputConnection.')
try:
pDatNorms.SetInputConnection(dataset.GetOutputPort())
except:
self.logger.error(' .. <PolyDataNormals> failed to SetInputConnection.')
if 'SetFeatureAngle' in currentElement.keys():
self.logger.debug(' .. <PolyDataNormals> trying to SetFeatureAngle')
try:
pDatNorms.SetFeatureAngle( float(currentElement.get('SetFeatureAngle')) )
except:
self.logger.error(' .. <PolyDataNormals> failed to SetFeatureAngle')
return pDatNorms
def Plane(self, currentElement):
aPlane = vtk.vtkPlane()
return aPlane
#class AvatarReader:
# def __init__ (self, basedir, bonelengths = ''):
# self.basedir = basedir + "/"
# self.bonelengths = bonelengths
# fd = open(self.basedir + "index.xml", 'r')
# XMLstring = fd.read()
# fd.close()
# xmlConverter = XML2VTK(XMLstring, bonelengths = self.bonelengths)
#
# self.actors = xmlConverter.actors
# self.assemblies = xmlConverter.assemblies
#
# # Bind everything into a single object for the viewer:
# self.assembly = vtk.vtkAssembly()
# for i in self.actors:
# self.assembly.AddPart(self.actors[i])
# for i in self.assemblies:
# self.assembly.AddPart(self.assemblies[i])
class MapReader:
def __init__ (self, mapname):
self.logger = logging.getLogger(name = "MapReader")
self.logger.debug('Attempting to load map: %s' % mapname + "/index.xml")
self.mapname = mapname
self.basedir = os.path.join(jvDataDir, 'Maps', mapname)
filename = os.path.join(jvDataDir, "Maps", mapname, "index.xml")
fd = open(filename, 'r')
XMLstring = fd.read()
fd.close()
self.logger.debug("Attempting to parse XML...")
try:
topElement = ET.fromstring(XMLstring)
except xml.parsers.expat.ExpatError as err:
self.logger.error("Failed to parse file: %s/index.xml:" % (mapname))
self.logger.error(" ExpatError: %s" % (err))
xmlConverter = XML2VTK(topElement, basedir=self.basedir)
self.textures = xmlConverter.textures
self.actors = xmlConverter.actors
self.assemblies = xmlConverter.assemblies
self.lights = xmlConverter.lights
self.glyphsources = xmlConverter.glyphsources
try:
self.bgcolor = xmlConverter.bgcolor
except:
pass
| mfassler/jaivis | jv/xmlReader.py | Python | gpl-2.0 | 41,125 | [
"VTK"
] | c514e115f0edeee10fcb67451e5d6e6e8337a00722b8e12c907de59beafbc02d |
import gym
from gym import spaces
from gym.utils import seeding
def cmp(a, b):
return int((a > b)) - int((a < b))
# 1 = Ace, 2-10 = Number cards, Jack/Queen/King = 10
deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
def draw_card(np_random):
return np_random.choice(deck)
def draw_hand(np_random):
return [draw_card(np_random), draw_card(np_random)]
def usable_ace(hand): # Does this hand have a usable ace?
return 1 in hand and sum(hand) + 10 <= 21
def sum_hand(hand): # Return current hand total
if usable_ace(hand):
return sum(hand) + 10
return sum(hand)
def is_bust(hand): # Is this hand a bust?
return sum_hand(hand) > 21
def score(hand): # What is the score of this hand (0 if bust)
return 0 if is_bust(hand) else sum_hand(hand)
def is_natural(hand): # Is this hand a natural blackjack?
return sorted(hand) == [1, 10]
class BlackjackEnv(gym.Env):
"""Simple blackjack environment
Blackjack is a card game where the goal is to obtain cards that sum to as
near as possible to 21 without going over. They're playing against a fixed
dealer.
Face cards (Jack, Queen, King) have point value 10.
Aces can either count as 11 or 1, and it's called 'usable' at 11.
This game is placed with an infinite deck (or with replacement).
The game starts with each (player and dealer) having one face up and one
face down card.
The player can request additional cards (hit=1) until they decide to stop
(stick=0) or exceed 21 (bust).
After the player sticks, the dealer reveals their facedown card, and draws
until their sum is 17 or greater. If the dealer goes bust the player wins.
If neither player nor dealer busts, the outcome (win, lose, draw) is
decided by whose sum is closer to 21. The reward for winning is +1,
drawing is 0, and losing is -1.
The observation of a 3-tuple of: the players current sum,
the dealer's one showing card (1-10 where 1 is ace),
and whether or not the player holds a usable ace (0 or 1).
This environment corresponds to the version of the blackjack problem
described in Example 5.1 in Reinforcement Learning: An Introduction
by Sutton and Barto (1998).
https://webdocs.cs.ualberta.ca/~sutton/book/the-book.html
"""
def __init__(self, natural=False):
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)))
self._seed()
# Flag to payout 1.5 on a "natural" blackjack win, like casino rules
# Ref: http://www.bicyclecards.com/how-to-play/blackjack/
self.natural = natural
# Start the first game
self._reset() # Number of
self.nA = 2
def reset(self):
return self._reset()
def step(self, action):
return self._step(action)
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action)
if action: # hit: add a card to players hand and return
self.player.append(draw_card(self.np_random))
if is_bust(self.player):
done = True
reward = -1
else:
done = False
reward = 0
else: # stick: play out the dealers hand, and score
done = True
while sum_hand(self.dealer) < 17:
self.dealer.append(draw_card(self.np_random))
reward = cmp(score(self.player), score(self.dealer))
if self.natural and is_natural(self.player) and reward == 1:
reward = 1.5
return self._get_obs(), reward, done, {}
def _get_obs(self):
return (sum_hand(self.player), self.dealer[0], usable_ace(self.player))
def _reset(self):
self.dealer = draw_hand(self.np_random)
self.player = draw_hand(self.np_random)
# Auto-draw another card if the score is less than 12
while sum_hand(self.player) < 12:
self.player.append(draw_card(self.np_random))
return self._get_obs()
| dennybritz/reinforcement-learning | lib/envs/blackjack.py | Python | mit | 4,251 | [
"CASINO"
] | f1a35d2db407d9034d3fe396622025fba467415b05bec4560735ee8c12ba93a3 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2015 Brian Ccoa, <bcoca@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for generic api modules
In order to use this module, include it as part of a custom
module as shown below.
The 'api' module provides the following common argument specs:
* rate limit spec
- rate: number of requests per time unit (int)
- rate_limit: time window in which the limit is applied in seconds
* retry spec
- retries: number of attempts
- retry_pause: delay between attempts in seconds
"""
import time
def rate_limit_argument_spec(spec=None):
"""Creates an argument spec for working with rate limiting"""
arg_spec = (dict(
rate=dict(type='int'),
rate_limit=dict(type='int'),
))
if spec:
arg_spec.update(spec)
return arg_spec
def retry_argument_spec(spec=None):
"""Creates an argument spec for working with retrying"""
arg_spec = (dict(
retries=dict(type='int'),
retry_pause=dict(type='float', default=1),
))
if spec:
arg_spec.update(spec)
return arg_spec
def basic_auth_argument_spec(spec=None):
arg_spec = (dict(
api_username=dict(type='str'),
api_password=dict(type='str', no_log=True),
api_url=dict(type='str'),
validate_certs=dict(type='bool', default=True)
))
if spec:
arg_spec.update(spec)
return arg_spec
def rate_limit(rate=None, rate_limit=None):
"""rate limiting decorator"""
minrate = None
if rate is not None and rate_limit is not None:
minrate = float(rate_limit) / float(rate)
def wrapper(f):
last = [0.0]
def ratelimited(*args, **kwargs):
if minrate is not None:
elapsed = time.clock() - last[0]
left = minrate - elapsed
if left > 0:
time.sleep(left)
last[0] = time.clock()
ret = f(*args, **kwargs)
return ret
return ratelimited
return wrapper
def retry(retries=None, retry_pause=1):
"""Retry decorator"""
def wrapper(f):
retry_count = 0
def retried(*args, **kwargs):
if retries is not None:
ret = None
while True:
# pylint doesn't understand this is a closure
retry_count += 1 # pylint: disable=undefined-variable
if retry_count >= retries:
raise Exception("Retry limit exceeded: %d" % retries)
try:
ret = f(*args, **kwargs)
except Exception:
pass
if ret:
break
time.sleep(retry_pause)
return ret
return retried
return wrapper
| resmo/ansible | lib/ansible/module_utils/api.py | Python | gpl-3.0 | 4,456 | [
"Brian"
] | 14855dd0feae42b56312a44445c4e84fdb593313ed2229cc9a73acfc9e953de4 |
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""defines few unrelated algorithms, that works on declarations"""
import types
def declaration_path( decl, with_defaults=True ):
"""
returns a list of parent declarations names
@param decl: declaration for which declaration path should be calculated
@type decl: L{declaration_t}
@return: [names], where first item contains top parent name and last item
contains decl name
"""
if not decl:
return []
if not decl.cache.declaration_path:
result = [ decl.name ]
parent = decl.parent
while parent:
if parent.cache.declaration_path:
result.reverse()
decl.cache.declaration_path = parent.cache.declaration_path + result
return decl.cache.declaration_path
else:
result.append( parent.name )
parent = parent.parent
result.reverse()
decl.cache.declaration_path = result
return result
else:
return decl.cache.declaration_path
def partial_declaration_path( decl ):
"""
returns a list of parent declarations names without template arguments that
have default value
@param decl: declaration for which declaration path should be calculated
@type decl: L{declaration_t}
@return: [names], where first item contains top parent name and last item
contains decl name
"""
#TODO:
#If parent declaration cache already has declaration_path, reuse it for
#calculation.
if not decl:
return []
if not decl.cache.partial_declaration_path:
result = [ decl.partial_name ]
parent = decl.parent
while parent:
if parent.cache.partial_declaration_path:
result.reverse()
decl.cache.partial_declaration_path \
= parent.cache.partial_declaration_path + result
return decl.cache.partial_declaration_path
else:
result.append( parent.partial_name )
parent = parent.parent
result.reverse()
decl.cache.partial_declaration_path = result
return result
else:
return decl.cache.partial_declaration_path
def get_named_parent( decl ):
"""
returns a reference to a named parent declaration
@param decl: the child declaration
@type decl: L{declaration_t}
@return: reference to L{declaration_t} or None if not found
"""
if not decl:
return None
parent = decl.parent
while parent and ( not parent.name or parent.name == '::' ):
parent = parent.parent
return parent
def full_name_from_declaration_path( dpath ):
##Here I have lack of knowledge:
##TODO: "What is the full name of declaration declared in unnamed namespace?"
result = filter( None, dpath )
result = result[0] + '::'.join( result[1:] )
return result
def full_name( decl, with_defaults=True ):
"""
returns full name of the declaration
@param decl: declaration for which full name should be calculated. If decl
belongs to unnamed namespace, then L{full_name} is not valid C++ full name.
@type decl: L{declaration_t}
@return: full name of declarations.
"""
if None is decl:
raise RuntimeError( "Unable to generate full name for None object!" )
if with_defaults:
if not decl.cache.full_name:
decl.cache.full_name = full_name_from_declaration_path( declaration_path( decl ) )
return decl.cache.full_name
else:
if not decl.cache.full_partial_name:
decl.cache.full_partial_name \
= full_name_from_declaration_path( partial_declaration_path( decl ) )
return decl.cache.full_partial_name
def make_flatten( decl_or_decls ):
"""
converts tree representation of declarations to flatten one.
@param decl_or_decls: reference to list of declaration's or single declaration
@type decl_or_decls: L{declaration_t} or [ L{declaration_t} ]
@return: [ all internal declarations ]
"""
import pygccxml.declarations #prevent cyclic import
def proceed_single( decl ):
answer = [ decl ]
if not isinstance( decl, pygccxml.declarations.scopedef_t ):
return answer
for elem in decl.declarations:
if isinstance( elem, pygccxml.declarations.scopedef_t ):
answer.extend( proceed_single( elem ) )
else:
answer.append( elem )
return answer
decls = []
if isinstance( decl_or_decls, types.ListType ):
decls.extend( decl_or_decls )
else:
decls.append( decl_or_decls )
answer = []
for decl in decls:
answer.extend( proceed_single( decl ) )
return answer
def __make_flatten_generator( decl_or_decls ):
"""
converts tree representation of declarations to flatten one.
@param decl_or_decls: reference to list of declaration's or single declaration
@type decl_or_decls: L{declaration_t} or [ L{declaration_t} ]
@return: [ all internal declarations ]
"""
import pygccxml.declarations
def proceed_single( decl ):
yield decl
if not isinstance( decl, pygccxml.declarations.scopedef_t):
return
for internal in decl.declarations:
if isinstance( internal, pygccxml.declarations.scopedef_t):
for internal_internal in proceed_single( internal ):
yield internal_internal
else:
yield internal
if isinstance( decl_or_decls, types.ListType ):
for creator in decl_or_decls:
for internal in proceed_single( creator ):
yield internal
else:
for internal in proceed_single( decl_or_decls ):
yield internal
def get_global_namespace(decls):
import pygccxml.declarations
found = filter( lambda decl: decl.name == '::'
and isinstance( decl, pygccxml.declarations.namespace_t )
, make_flatten( decls ) )
if len( found ) == 1:
return found[0]
raise RuntimeError( "Unable to find global namespace." )
class match_declaration_t:
"""
helper class for different search algorithms.
This class will help developer to match declaration by:
- declaration type, for example L{class_t} or L{operator_t}.
- declaration name
- declaration full name
- reference to parent declaration
"""
def __init__( self, type=None, name=None, fullname=None, parent=None ):
self.type = type
self.name = name
self.fullname = fullname
self.parent = parent
def does_match_exist(self, inst):
"""
returns True if inst do match one of specified criteria
@param inst: declaration instance
@type inst: L{declaration_t}
@return: bool
"""
answer = True
if None != self.type:
answer &= isinstance( inst, self.type)
if None != self.name:
answer &= inst.name == self.name
if None != self.parent:
answer &= self.parent is inst.parent
if None != self.fullname:
if inst.name:
answer &= self.fullname == full_name( inst )
else:
answer = False
return answer
def __call__(self, inst):
"""C{return self.does_match_exist(inst)}"""
return self.does_match_exist(inst)
def find_all_declarations( declarations
, type=None
, name=None
, parent=None
, recursive=True
, fullname=None ):
"""
returns a list of all declarations that match criteria, defined by developer
For more information about arguments see L{match_declaration_t} class.
@return: [ matched declarations ]
"""
decls = []
if recursive:
decls = make_flatten( declarations )
else:
decls = declarations
return filter( match_declaration_t(type, name, fullname, parent), decls )
def find_declaration( declarations
, type=None
, name=None
, parent=None
, recursive=True
, fullname=None ):
"""
returns single declaration that match criteria, defined by developer.
If more the one declaration was found None will be returned.
For more information about arguments see L{match_declaration_t} class.
@return: matched declaration L{declaration_t} or None
"""
decl = find_all_declarations( declarations, type=type, name=name, parent=parent, recursive=recursive, fullname=fullname )
if len( decl ) == 1:
return decl[0]
def find_first_declaration( declarations, type=None, name=None, parent=None, recursive=True, fullname=None ):
"""
returns first declaration that match criteria, defined by developer
For more information about arguments see L{match_declaration_t} class.
@return: matched declaration L{declaration_t} or None
"""
matcher = match_declaration_t(type, name, fullname, parent)
if recursive:
decls = make_flatten( declarations )
else:
decls = declarations
for decl in decls:
if matcher( decl ):
return decl
return None
def declaration_files(decl_or_decls):
"""
returns set of files
Every declaration is declared in some file. This function returns set, that
contains all file names of declarations.
@param decl_or_decls: reference to list of declaration's or single declaration
@type decl_or_decls: L{declaration_t} or [ L{declaration_t} ]
@return: set( declaration file names )
"""
files = set()
decls = make_flatten( decl_or_decls )
for decl in decls:
if decl.location:
files.add( decl.location.file_name )
return files
class visit_function_has_not_been_found_t( RuntimeError ):
"""
exception that is raised, from L{apply_visitor}, when a visitor could not be
applied.
"""
def __init__( self, visitor, decl_inst ):
RuntimeError.__init__( self )
self.__msg = \
"Unable to find visit function. Visitor class: %s. Declaration instance class: %s'" \
% ( visitor.__class__.__name__, decl_inst.__class__.__name__ )
def __str__(self):
return self.__msg
def apply_visitor( visitor, decl_inst):
"""
applies a visitor on declaration instance
@param visitor: instance
@type visitor: L{type_visitor_t} or L{decl_visitor_t}
"""
fname = 'visit_' + decl_inst.__class__.__name__[:-2] #removing '_t' from class name
if not hasattr(visitor, fname ):
raise visit_function_has_not_been_found_t( visitor, decl_inst )
getattr( visitor, fname )()
| avaitla/Haskell-to-C---Bridge | pygccxml-1.0.0/pygccxml/declarations/algorithm.py | Python | bsd-3-clause | 11,478 | [
"VisIt"
] | 0194c93850eaae0a2b54c841181878c244a1177528333e30c3af26174a3098d1 |
# -*- coding: utf-8 -*-
"""
Multiple operations on data or modeling results (e.g. aligning, convolution, ...)
Author: R. Lombaert
"""
import collections
from scipy import mean, std, sqrt, log, isfinite
from scipy import array, zeros, arange
from scipy.stats import tmean, tstd
from scipy.optimize import leastsq
from scipy.integrate import trapz
from scipy.special import erf
import numpy as np
from cc.tools.numerical import Interpol
def alignY(datalists,xmin,xmax,zeropoint=0,p0=[1,0,1.5,-2.5],func='power'):
"""
*** WILL BE REWRITTEN ***
Align two datasets by shifting Y coordinate.
Works on multiple data lists at a time. Each dataset is shifted to match
with the previous dataset AFTER the previous one has already been shifted.
e.g. for the third dataset y_new = zeropoint + shifts[1]*shifts[2]*y
The shifts between datasets are multiplicative, the first dataset is
shifted additively by the keyword zeropoint.
At least 3 points of overlap are required!
@param datalists: two or more 2d lists of data giving (x,y)
@type datalists: list[list[(.,.)]]
@param xmin: the lower boundar(y)(ies) of the overlapping region, used for
alignment. Must be 1 value for 2 datasets, n-1 values for more
@type xmin: float or list
@param xmax: the upper boundar(y)(ies) of the overlapping region, used for
alignment. Must be 1 value for 2 datasets, n-1 values for more
@type xmax: float or list
@keyword zeropoint: The first dataset is shifted additively with this value
(default: 0)
@type zeropoint: float
@keyword p0: initial parameters for fitting function definition
(default: [1,0,1.5,-2.5])
@type p0: list
@return: The datasets are returned as given, but with shifted Y-values, and
the shifts used (first value additively, rest multiplicatively)
@rtype: (list[list[(.,.)]], list)
"""
#- zeropoint correction
shifts = [zeropoint]
current_data = array(datalists[0])
corrected = [[coord + array([0,zeropoint]) for coord in current_data]]
#- Power law is fitted to overlapping xrange for both datasets with leastsq
#- x of second list is evaluated with both functions
#- second list's y values are corrected by the mean of the ratios of the
#- two function evaluations
for i in xrange(len(datalists)-1):
p_lsqlist1 = leastsq(Interpol.getResiduals,p0,\
args=([x
for x in array(corrected[i])[:,0]
if x >= xmin[i] and x <= xmax[i]],\
[coord[1]
for coord in array(corrected[i])
if coord[0] >= xmin[i] and coord[0] <= xmax[i]],\
func),\
maxfev=2000)[0]
p_lsqlist2 = leastsq(Interpol.getResiduals,p0,\
args=([x
for x in array(datalists[i+1])[:,0]
if x >= xmin[i] and x <= xmax[i]],\
[coord[1]
for coord in array(datalists[i+1])
if coord[0] >= xmin[i] and coord[0] <= xmax[i]],\
func),\
maxfev=2000)[0]
f1x2 = Interpol.pEval([x
for x in array(datalists[i+1])[:,0]
if x >= xmin[i] and x <= xmax[i]],
p_lsqlist1,func)
f2x2 = Interpol.pEval([x
for x in array(datalists[i+1])[:,0]
if x >= xmin[i] and x <= xmax[i]],\
p_lsqlist2,func)
shifts.append(mean(f1x2/f2x2))
corrected.append([coord*array([1,shifts[i+1]])
for coord in array(datalists[i+1])])
return corrected,shifts
def doConvolution(x_in,y_in,x_out,widths,factor=5,oversampling=1):
'''
Perform convolution on lists with a Gaussian filter.
Reduce the input grid to the target grid by integration.
@param x_in: The input x-values
@type x_in: array
@param y_in: The input y-values
@type y_in: array
@param x_out: The target x-grid
@type x_out: array
@param widths: The full width/half maximum spectral resolution as a
function of wavelength, i.e. the fwhm of the gaussian
@type widths: array
@keyword factor: the sigma factor for determining the window pushed through
the gaussian filter. This avoids having to convolve the
whole input grid, which takes a lot of time. Beyond
sigma*factor the contribution of the y values is assumed
to be negligible.
(default: 5)
@type factor: int
@keyword oversampling: oversampling factor of the target x-grid with
respect to the given spectral resolution.
(default: 1)
@type oversampling: int
@return: The resulting y-values
@rtype: list
'''
x_in,y_in,x_out,widths = array(x_in),array(y_in),array(x_out),array(widths)
y_out = []
print 'Convolving for x_out between %.2f micron and %.2f micron with oversampling %i.' \
%(x_out[0],x_out[-1],int(oversampling))
#- Convert FWHM's to sigma for the gaussians
sigma = [fwhm/(2.*sqrt(2.*log(2.))) for fwhm in widths]
#- Define the binsizes of the bins that will be integrated, i.e. the
#- apparent resolution of x_out
binsize = [w/oversampling for w in widths]
for delta_bin,sigi,xi_out in zip(binsize,sigma,x_out):
yi_in = y_in[abs(x_in-xi_out)<=factor*sigi]
#- if not empty: continue, else add 0
if list(yi_in) and set(yi_in) != set([0.0]):
#- all relevant xi's for the bin around xi_out, ie in this bin the
#- y-values will be integrated
xi_in = x_in[abs(x_in-xi_out)<=delta_bin]
#- The window for the convolution itself, outside this window the
#- data are assumed to be negligible, ie for a gaussian
window = x_in[abs(x_in-xi_out)<=factor*sigi]
convolution = convolveArray(window,yi_in,sigi)
#- if one value in the bin, out of the window selection: add value
if len(list(convolution[abs(window-xi_out)<=delta_bin])) == 1:
y_out.append(convolution[abs(window-xi_out)<=delta_bin][0])
print 'Convolution has a window of only one element at xi_out %f.'%xi_out
#- If more than one value: integrate
elif list(convolution[abs(window-xi_out)<=delta_bin]):
y_out.append(trapz(y=convolution[abs(window-xi_out)<=delta_bin],x=xi_in)/(xi_in[-1]-xi_in[0]))
#- If no values in the bin from the window: add average of the window
#- This should not occur ideally!
else:
print 'Convolution has a window of no elements at x_out ' + \
'%f. Careful! Average is taken of '%(xi_out) + \
'sigma*factor window! This should not be happening...'
y_out.append(sum(convolution)/float(len(convolution)))
else:
y_out.append(0.0)
return y_out
def convolveArray(xx, yy=None, sigma=3):
"""
Convolves an intensity-versus-velocity profile with
an instrumental Gaussian profile of width 'sigma'
by Kristof Smolders
@param xx: x values
@type xx: array
@keyword yy: y values, if None, the y values are assumed to be included in
xx, in its second dimension.
(default: None)
@type yy: array
@keyword sigma: width of the gaussian profile
(default: 3)
@type sigma: float
@return: The new y values after convolution
@rtype: array
"""
if yy is None and xx.shape[0] > 1:
yy = xx[1,:]
xx = xx[0,:]
if xx == array([]):
out = array([0.0])
elif len(xx)==1:
out = xx
else:
nn = len(xx)
out = zeros(nn)
# Begin: half a pixel
C = yy[0]
xb = xx[0]
xe = 0.5*(xx[1]+xx[0])
out = yy[0] + C/2 * (erf((xe-xx)/(sqrt(2)*sigma)) - 1)
# End: half a pixel
C = yy[nn-1]
xb = 0.5*(xx[nn-1]+xx[nn-2])
xe = xx[nn-1]
out = out + C/2 * (1 - erf((xb-xx)/(sqrt(2)*sigma)))
# Middle
for jj in arange(1,nn-1):
C = yy[jj]
xb = 0.5*(xx[jj]+xx[jj-1])
xe = 0.5*(xx[jj]+xx[jj+1])
out = out + C/2 * (erf((xe-xx)/(sqrt(2)*sigma)) -
erf((xb-xx)/(sqrt(2)*sigma)))
return out
def reduceArray(arr,stepsize,cutoff=None,mode='average'):
'''
Reduce the size of a 1d-array.
Two modes are available:
- average: subsequent n=stepsize elements are averaged
- remove: keep one element every n=stepsize elements
The average mode can be used when, e.g., reading MCMax output, where the
density/temperature/... grids are given for radial and angular coordinates.
In case only the radial points are needed, the stepsize would then be the
number of angular grid cells.
@param arr: The array to be reduced
@type arr: np.array
@param stepsize: The number of subsequent elements to average/frequency of
element removal.
@type stepsize: int
@keyword cutoff: A cutoff value below which no reduction is done. Default
if the full array is to be reduced. Only relevant for
mode == 'remove'.
(default: None)
@type cutoff: float
@keyword mode: The reduction mode, either 'average' or 'remove'. If the
mode is not recognized, it is assumed to be 'average'.
(default: 'average')
@type mode: string
@return: The reduced array
@rtype: np.array
'''
arr, stepsize, mode = array(arr), int(stepsize), str(mode).lower()
if mode == 'remove':
if not cutoff is None:
cutoff = float(cutoff)
arrkeep = arr[arr<=cutoff]
arrsel = arr[arr>cutoff]
else:
arrkeep = np.empty(0)
arrsel = arr
arrsel = arrsel[::stepsize]
redarr = np.concatenate((arrkeep,arrsel))
else:
redarr = np.mean(arr.reshape(-1,stepsize),axis=1)
return redarr
def getRMS(flux,limits=(None,None),wave=None,wmin=None,wmax=None,minsize=20):
'''
Get the RMS of a flux array in a given wavelength range. If no wavelengths
are given, the RMS of the whole array is given.
If the array used for RMS calculation is too small, None is returned.
The mean should already be subtracted!
A 1-sigma clipping of the flux array can be done by providing limits.
@param flux: The wavelength array
@type flux: array
@keyword limits: Flux limits if flux clipping (1 sigma!) is needed before
RMS calculation. None for both limits implies no clipping.
None for one of the limits implies a half-open interval.
(lower limit,upper limit)
(default: (None,None))
@type limits: (float,float)
@keyword wave: The wavelength array. If default, the RMS is calculated of
the whole flux array
@type wave: array
@keyword wmin: The minimum wavelength. If not given, the minimum wavelength
is the first entry in the wave array
(default: None)
@type wmin: float
@keyword wmin: The maximum wavelength. If not given, the maximum wavelength
is the last entry in the wave array
(default: None)
@type wmax: float
@keyword minsize: The minimum size of the selected array before proceeding
with the noise calculation. 0 if no min size is needed.
(default: 20)
@type minsize: int
@return: The flux RMS between given wavelengths
@rtype: float
'''
fsel = selectArray(flux,wave,wmin,wmax)
if fsel.size <= minsize:
return None
if limits == (None,None):
return sqrt((fsel**2).sum()/float(len(fsel)))
else:
fsel2 = fsel[(fsel>limits[0])*(fsel<limits[1])]
if fsel.size == 0:
return None
else:
return sqrt((fsel2**2).sum()/float(len(fsel2)))
def getMean(flux,limits=(None,None),wave=None,wmin=None,wmax=None,minsize=20):
'''
Get the mean of a flux array in a given wavelength range. If no wavelengths
are given, the mean of the whole array is given.
If the array used for mean calculation is too small, None is returned.
A 1-sigma clipping of the flux array can be done by providing limits.
@param flux: The wavelength array
@type flux: array
@keyword limits: Flux limits if flux clipping (1 sigma!) is needed before
Meancalculation. None for both limits implies no clipping.
None for one of the limits implies a half-open interval.
(default: (None,None))
@type limits: (float,float)
@keyword wave: The wavelength array. If default, the Mean is calculated of
the whole flux array
@type wave: array
@keyword wmin: The minimum wavelength. If not given, the minimum wavelength
is the first entry in the wave array
(default: None)
@type wmin: float
@keyword wmin: The maximum wavelength. If not given, the maximum wavelength
is the last entry in the wave array
(default: None)
@type wmax: float
@keyword minsize: The minimum size of the selected array before proceeding
with the noise calculation. 0 if no min size is needed.
(default: 20)
@type minsize: int
@return: The flux mean between given wavelengths
@rtype: float
'''
fsel = selectArray(flux,wave,wmin,wmax)
if fsel.size <= minsize:
return None
if limits == (None,None):
return mean(fsel)
else:
return tmean(fsel,limits=limits)
def getStd(flux,limits=(None,None),wave=None,wmin=None,wmax=None,minsize=20):
'''
Get the std of a flux array in a given wavelength range. If no min/max
wavelengths are given, the std of the whole array is given.
If the array used for std calculation is too small, None is returned.
A 1-sigma clipping of the flux array can be done by providing limits.
@param flux: The wavelength array
@type flux: array
@keyword limits: Flux limits if flux clipping (1 sigma!) is needed before
STD calculation. None for both limits implies no clipping.
None for one of the limits implies a half-open interval.
(default: (None,None))
@type limits: (float,float)
@keyword wave: The wavelength array. If default, the STD is calculated of
the whole flux array
@type wave: array
@keyword wmin: The minimum wavelength. If not given, the minimum wavelength
is the first entry in the wave array
(default: None)
@type wmin: float
@keyword wmin: The maximum wavelength. If not given, the maximum wavelength
is the last entry in the wave array
(default: None)
@type wmax: float
@keyword minsize: The minimum size of the selected array before proceeding
with the noise calculation. 0 if no min size is needed.
(default: 20)
@type minsize: int
@return: The flux std between given wavelengths
@rtype: float
'''
fsel = selectArray(flux,wave,wmin,wmax)
if fsel.size <= minsize:
return None
if limits == (None,None):
return std(fsel)
else:
return tstd(fsel,limits=limits)
def selectArray(flux,wave=None,wmin=None,wmax=None):
"""
Select the sub array of a flux, given a wavelength range. If no range is
given, return the full array.
@param flux: The wavelength array
@type flux: array
@keyword wave: The wavelength array. If default, no wavelength selection is
done.
@type wave: array
@keyword wmin: The minimum wavelength. If not given, the minimum wavelength
is the first entry in the wave array
(default: None)
@type wmin: float
@keyword wmin: The maximum wavelength. If not given, the maximum wavelength
is the last entry in the wave array
(default: None)
@type wmax: float
@return: The flux in given wavelengths
@rtype: array
"""
flux = array(flux)
fsel = flux[isfinite(flux)]
if not wave is None:
wave = array(wave)
wsel = wave[isfinite(flux)]
if wmin is None:
wmin = wsel[0]
if wmax is None:
wmax = wsel[-1]
wmin, wmax = float(wmin), float(wmax)
fsel = fsel[(wsel>=wmin)*(wsel<=wmax)]
return fsel
def arrayify(x):
'''
Check whether the input value is a proper array.
@param x: The input candidate array
@type x: anything
@return: an array of the input value
@rtype: array
'''
#-- Already an array, that is not just an number
if isinstance(x,np.ndarray) and x.shape:
return x
#-- An array of size 1, but just a number. Make it an array with a shape
if isinstance(x,np.ndarray):
return np.array([x])
#-- Not an array, but iterable and not a string
if isinstance(x,collections.Iterable) and not isinstance(x,str):
return np.array(x)
#-- A string: make it a float first
if isinstance(x,str):
return np.array([float(x)])
#-- Just a number, make it an array but through a one-element list
return np.array([x])
| robinlombaert/ComboCode | cc/data/Data.py | Python | gpl-3.0 | 18,291 | [
"Gaussian"
] | 14b7984d26e22e6a8c760127ee834e9491a9fac3396c720c631e7610fdd7b84a |
"""
@name: Modules/House/Entertainment/entertainment_util.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2019-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Dec 23, 2019
@summary:
"""
# Import system type stuff
from ruamel.yaml.comments import CommentedSeq, CommentedMap
# Import PyMh files and modules.
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.EntertainConfig')
def _extract_list(p_config):
"""
"""
l_list = []
for l_value in p_config:
l_list.append(l_value)
return l_list
def extract_device_config_file(p_config):
"""
"""
l_dict = {}
for l_key, l_value in p_config.items():
# LOG.debug('Key: "{}"\tType: {}'.format(l_key, type(l_value)))
if isinstance(l_value, (int, str)):
l_scalar = l_value
l_dict[l_key] = l_scalar
if isinstance(l_value, CommentedSeq):
l_list = '**List**' # self._extract_model_config_list(l_value)
# l_dict[l_key] = l_list
l_dict[l_key] = _extract_list(l_value)
elif isinstance(l_value, CommentedMap):
l_map = 'Mapping' # self._extract_model_config_key(l_value)
l_dict[l_key] = extract_device_config_file(l_value)
else:
# l_dict[l_key] = '** DBK Unknown type **: {} {}'.format(type(l_value), l_value)
l_dict[l_key] = l_value
# LOG.debug(PrettyFormatAny.form(l_dict, 'extract key/Val'))
return l_dict
def extract_zone(p_config):
""" Zone is an output area.
Each zone has a set of speakers that are driven by an A/V device.
Zones:
0: Main
1: Lanai
"""
l_required = ['Name', 'Type', 'Host']
l_obj = {}
for l_key, l_value in p_config.items():
setattr(l_obj, l_key, l_value)
return l_obj # For testing.
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/House/Entertainment/entertainment_utility.py | Python | mit | 1,912 | [
"Brian"
] | 2ead3c3373521cf3944526e7d517954acdd171e25cc1ff9c64e19845dd6eed98 |
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import sys
# Add path to Bio
sys.path.append('../..')
"""Code to work with the prosite.doc file from Prosite.
See http://www.expasy.ch/prosite/
Tested with:
Release 15.0, July 1998
Release 16.0, July 1999
Release 20.22, 13 November 2007
Release 20.43, 10 February 2009
Functions:
- read Read a Prodoc file containing exactly one Prodoc entry.
- parse Iterates over entries in a Prodoc file.
Classes:
- Record Holds Prodoc data.
- Reference Holds data from a Prodoc reference.
"""
__docformat__ = "restructuredtext en"
def read(handle):
"""Read in a record from a file with exactly one Prodoc record."""
record = __read(handle)
# We should have reached the end of the record by now
line = handle.readline()
if line:
raise ValueError("More than one Prodoc record found")
return record
def parse(handle):
"""Iterate over the records in a Prodoc file."""
while True:
record = __read(handle)
if not record:
return
yield record
class Record(object):
"""Holds information from a Prodoc record.
Members:
accession Accession number of the record.
prosite_refs List of tuples (prosite accession, prosite name).
text Free format text.
references List of reference objects.
"""
def __init__(self):
self.accession = ''
self.prosite_refs = []
self.text = ''
self.references = []
class Reference(object):
"""Holds information from a Prodoc citation.
Members:
- number Number of the reference. (string)
- authors Names of the authors.
- citation Describes the citation.
"""
def __init__(self):
self.number = ''
self.authors = ''
self.citation = ''
# Below are private functions
def __read_prosite_reference_line(record, line):
line = line.rstrip()
if line[-1] != '}':
raise ValueError("I don't understand the Prosite reference on line\n%s" % line)
acc, name = line[1:-1].split('; ')
record.prosite_refs.append((acc, name))
def __read_text_line(record, line):
record.text += line
return True
def __read_reference_start(record, line):
# Read the references
reference = Reference()
reference.number = line[1:3].strip()
if line[1] == 'E':
# If it's an electronic reference, then the URL is on the
# line, instead of the author.
reference.citation = line[4:].strip()
else:
reference.authors = line[4:].strip()
record.references.append(reference)
def __read_reference_line(record, line):
if not line.strip():
return False
reference = record.references[-1]
if line.startswith(' '):
if reference.authors[-1] == ',':
reference.authors += line[4:].rstrip()
else:
reference.citation += line[5:]
return True
raise Exception("I don't understand the reference line\n%s" % line)
def __read_copyright_line(record, line):
# Skip the copyright statement
if line.startswith('+----'):
return False
return True
def __read(handle):
# Skip blank lines between records
for line in handle:
line = line.rstrip()
if line and not line.startswith("//"):
break
else:
return None
record = Record()
# Read the accession number
if not line.startswith("{PDOC"):
raise ValueError("Line does not start with '{PDOC':\n%s" % line)
if line[-1] != '}':
raise ValueError("I don't understand accession line\n%s" % line)
record.accession = line[1:-1]
# Read the Prosite references
for line in handle:
if line.startswith('{PS'):
__read_prosite_reference_line(record, line)
else:
break
else:
raise ValueError("Unexpected end of stream.")
# Read the actual text
if not line.startswith('{BEGIN'):
raise ValueError("Line does not start with '{BEGIN':\n%s" % line)
read_line = __read_text_line
for line in handle:
if line.startswith('{END}'):
# Clean up the record and return
for reference in record.references:
reference.citation = reference.citation.rstrip()
reference.authors = reference.authors.rstrip()
return record
elif line[0] == '[' and line[3] == ']' and line[4] == ' ':
__read_reference_start(record, line)
read_line = __read_reference_line
elif line.startswith('+----'):
read_line = __read_copyright_line
elif read_line:
if not read_line(record, line):
read_line = None
raise ValueError("Unexpected end of stream.")
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/ExPASy/Prodoc.py | Python | gpl-2.0 | 5,018 | [
"Biopython"
] | 82ca81e79b40908c6ae78277e7b5adb69c1b8f79bf175c838fceb55dba114a23 |
import numpy as np
import theano
import theano.tensor as T
from numpy.testing import assert_array_almost_equal
from smartlearner import views, stopping_criteria, Trainer, tasks
from smartlearner.optimizers import SGD, AdaGrad, Adam, RMSProp, Adadelta
from smartlearner.testing import DummyLoss, DummyBatchScheduler
from smartlearner.utils import sharedX
floatX = theano.config.floatX
class DummyLossWithGradient(DummyLoss):
def __init__(self, cost, param):
super().__init__()
self.cost = cost
self.param = param
def _get_gradients(self):
gparam = T.grad(cost=self.cost, wrt=self.param)
return {self.param: gparam}
def test_sgd():
# Create simple Nd gaussian functions to optimize. These functions are
# (perfectly) well-conditioned so it should take only one gradient step
# to converge using 1/L, where L is the largest eigenvalue of the hessian.
max_epoch = 2
for N in range(1, 5):
center = np.arange(1, N+1)[None, :].astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), T.eye(N)), (param-center).T))
loss = DummyLossWithGradient(cost, param)
trainer = Trainer(SGD(loss), DummyBatchScheduler())
# Monitor the gradient of `loss` w.r.t. to `param`.
tracker = tasks.Tracker(loss.gradients[param])
trainer.append_task(tracker)
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
trainer.train()
# Since the problem is well-conditionned and we use an optimal gradient step 1/L,
# two epochs should be enough for `param` to be around `center` and the gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(tracker[0], 0.)
# Create an Nd gaussian function to optimize. This function is not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
# cost = T.sum(N*0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
max_epoch = 80
N = 4
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
loss = DummyLossWithGradient(cost, param)
trainer = Trainer(SGD(loss), DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Monitor the gradient of `loss` w.r.t. to `param`.
tracker = tasks.Tracker(loss.gradients[param])
trainer.append_task(tracker)
trainer.train()
# Since the problem is well-conditionned and we use an optimal gradient step 1/L,
# two epochs should be enough for `param` to be around `center` and the gradients near 0.
assert_array_almost_equal(param.get_value(), center, decimal=6)
assert_array_almost_equal(tracker[0], 0.)
def test_adagrad():
max_epoch = 15
# Create an Nd gaussian functions to optimize. These functions are not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
for N in range(1, 5):
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
loss = DummyLossWithGradient(cost, param)
# Even with a really high gradient step, AdaGrad can still converge.
# Actually, it is faster than using the optimal gradient step with SGD.
optimizer = AdaGrad(loss, lr=100, eps=1e-1)
trainer = Trainer(optimizer, DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Monitor the gradient of `loss` w.r.t. to `param`.
tracker = tasks.Tracker(loss.gradients[param])
trainer.append_task(tracker)
trainer.train()
# After 15 epochs, param should be around the center and gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(tracker[0], 0.)
def test_adam():
max_epoch = 300
# Create an Nd gaussian functions to optimize. These functions are not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
for N in range(1, 5):
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
loss = DummyLossWithGradient(cost, param)
# Even with a really high gradient step, Adam can still converge.
optimizer = Adam(loss, lr=1)
trainer = Trainer(optimizer, DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Monitor the gradient of `loss` w.r.t. to `param`.
tracker = tasks.Tracker(loss.gradients[param])
trainer.append_task(tracker)
trainer.train()
# After 300 epochs, param should be around the center and gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(tracker[0], 0.)
def test_rmsprop():
max_epoch = 10
# Create an Nd gaussian functions to optimize. These functions are not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
for N in range(1, 5):
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
loss = DummyLossWithGradient(cost, param)
# Even with a really high gradient step, RMSProp can still converge.
optimizer = RMSProp(loss, lr=1)
trainer = Trainer(optimizer, DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Monitor the gradient of `loss` w.r.t. to `param`.
tracker = tasks.Tracker(loss.gradients[param])
trainer.append_task(tracker)
trainer.train()
# After 10 epochs, param should be around the center and gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(tracker[0], 0.)
def test_adadelta():
max_epoch = 1500
# Create an Nd gaussian functions to optimize. These functions are not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
for N in range(1, 5):
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
loss = DummyLossWithGradient(cost, param)
# Adadelta requires no learning rate.
optimizer = Adadelta(loss)
trainer = Trainer(optimizer, DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Monitor the gradient of `loss` w.r.t. to `param`.
tracker = tasks.Tracker(loss.gradients[param])
trainer.append_task(tracker)
trainer.train()
# After 1500 epochs, param should be around the center and gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(tracker[0], 0.)
| SMART-Lab/smartlearner | tests/optimizers/tests_optimizers.py | Python | bsd-3-clause | 7,477 | [
"Gaussian"
] | a1a08760956f33e84dd14383abe04d757548b6ebfb124dfefec59575a64d53b9 |
from __future__ import print_function, division
from sympy.core import S, C, sympify
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, as_int, xrange
from sympy.core.cache import cacheit
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
if argindex == 1:
return C.gamma(self.args[0] + 1)*C.polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return C.Integer(result)
def _eval_rewrite_as_gamma(self, n):
return C.gamma(n + 1)
def _eval_is_integer(self):
return self.args[0].is_integer
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_positive:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
factorial, sympy.utilities.iterables.generate_derangements
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return 1
elif n == 1:
return 0
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
try:
arg = as_int(arg)
if arg < 0:
raise ValueError
return C.Integer(cls._eval(arg))
except ValueError:
if sympify(arg).is_Number:
raise ValueError("argument must be a nonnegative integer")
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer,
self.args[0].is_nonnegative))
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for integers >= -1 as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n even
| 1 for n = 0, -1
`
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg == S.Zero or arg == S.NegativeOne:
return S.One
return factorial2(arg - 2)*arg
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer,
(self.args[0] + 1).is_nonnegative))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
Examples
========
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
See Also
========
factorial, factorial2, FallingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x + i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x - i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return C.gamma(x + k) / C.gamma(x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
See Also
========
factorial, factorial2, RisingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x - i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x + i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return (-1)**k * C.gamma(-x + k) / C.gamma(-x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
>>> [ binomial(0, i) for i in range(1)]
[1]
>>> [ binomial(1, i) for i in range(2)]
[1, 1]
>>> [ binomial(2, i) for i in range(3)]
[1, 2, 1]
>>> [ binomial(3, i) for i in range(4)]
[1, 3, 3, 1]
>>> [ binomial(4, i) for i in range(5)]
[1, 4, 6, 4, 1]
>>> binomial(Rational(5,4), 3)
-5/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n + 1) - C.polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n - k + 1) - C.polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
if k.is_Number:
if k.is_Integer:
if k < 0:
return S.Zero
elif k == 0 or n == k:
return S.One
elif n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return C.Integer(result)
elif n.is_Number:
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
elif k.is_negative:
return S.Zero
elif (n - k).simplify().is_negative:
return S.Zero
else:
d = n - k
if d.is_Integer:
return cls.eval(n, d)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return C.factorial(n)/(C.factorial(k)*C.factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
return C.gamma(n + 1)/(C.gamma(k + 1)*C.gamma(n - k + 1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
| shipci/sympy | sympy/functions/combinatorial/factorials.py | Python | bsd-3-clause | 17,896 | [
"VisIt"
] | df820383bf077cd397b119bce197ab2fbbe82c24cad592cade89d91055c28185 |
from __future__ import absolute_import, division, print_function, unicode_literals
from textwrap import dedent
import collections
import hashlib
from functools import wraps
import os
import numpy as np
import netCDF4 as nc4
from gridded.utilities import (get_dataset,
_reorganize_spatial_data,
_align_results_to_spatial_data,
asarraylike)
from gridded import VALID_LOCATIONS
from gridded.grids import Grid, Grid_U, Grid_S, Grid_R
from gridded.depth import Depth
from gridded.time import Time
import logging
log = logging.getLogger(__name__)
class Variable(object):
"""
Variable object: represents a field of values associated with the grid.
Abstractly, it is usually a scalar physical property such a temperature,
salinity that varies over a the domain of the model.
This more or less maps to a variable in a netcdf file, but does not have
to come form a netcdf file, and this provides and abstraction where the
user can access the value in world coordinates, interpolated from the grid.
It holds a reference to its own grid object, and its data.
"""
default_names = []
cf_names = []
_def_count = 0
_default_component_types = {'time': Time,
'grid': Grid,
'depth': Depth}
def __init__(self,
name=None,
units=None,
time=None,
data=None,
grid=None,
depth=None,
data_file=None,
grid_file=None,
dataset=None,
varname=None,
fill_value=0,
location=None,
attributes=None,
**kwargs):
'''
This class represents a phenomenon using gridded data
:param name: Name
:type name: string
:param units: Units
:type units: string
:param time: Time axis of the data
:type time: list of datetime.datetime, netCDF4 Variable, or Time object
:param data: Underlying data source
:type data: array-like object such as netCDF4.Variable or numpy.ndarray
:param grid: Grid that the data corresponds with
:type grid: Grid object (pysgrid or pyugrid or )
:param data_file: Name of data source file
:type data_file: string
:param grid_file: Name of grid source file
:type grid_file: string
:param varname: Name of the variable in the data source file
:type varname: string
:param fill_value: the fill value used for undefined data
:param location: location on the grid -- possible values
depend on the grid type
:type location: str
:param attributes: attributes associated with the Variable
(analogous to netcdf variable attributes)
:type attributes: dict of key:value pairs
'''
# if any([grid is None, data is None]):
# raise ValueError("Grid and Data must be defined")
# if not hasattr(data, 'shape'):
# if grid.infer_location is None:
# raise ValueError('Data must be able to fit to the grid')
self.grid = grid
self.depth = depth
self.name = self._units = self._time = self._data = None
self.name = name
self.units = units
self.location = location
self.data = data
self.time = time if time is not None else self._default_component_types['time'].constant_time()
self.data_file = data_file
# the "main" filename for a Varibale should be the grid data.
self.filename = data_file
self.grid_file = grid_file
self.varname = varname
self._result_memo = collections.OrderedDict()
self.fill_value = fill_value
self.attributes = {} if attributes is None else attributes
# if the data is a netcdf variable, pull the attributes from there
try:
for attr in self.data.ncattrs():
self.attributes[attr] = data.getncattr(attr)
except AttributeError: # must not be a netcdf variable
pass # so just use what was passed in.
# for k in kwargs:
# setattr(self, k, kwargs[k])
@classmethod
def from_netCDF(cls,
filename=None,
varname=None,
grid_topology=None,
name=None,
units=None,
time=None,
time_origin=None,
grid=None,
depth=None,
dataset=None,
data_file=None,
grid_file=None,
location=None,
load_all=False, # Do we need this? I think not --- maybe a method to fully load later if wanted.
fill_value=0,
**kwargs
):
'''
Allows one-function creation of a Variable from a file.
:param filename: Default data source. Has lowest priority.
If dataset, grid_file, or data_file are provided,
this function uses them first
:type filename: string
:param varname: Explicit name of the data in the data source file.
Equivalent to the key used to look the item up
directly eg 'ds["lon_u"]' for a netCDF4 Dataset.
:type varname: string
:param grid_topology: Description of the relationship between grid
attributes and variable names.
:type grid_topology: {string : string, ...}
:param name: Name of this object
:type name: string
:param units: string such as 'm/s'
:type units: string
:param time: Time axis of the data. May be a constructed ``gridded.Time``
object, or collection of datetime.datetime objects
:type time: [] of datetime.datetime, netCDF4 Variable, or Time object
:param data: Underlying data object. May be any array-like,
including netCDF4 Variable, etc
:type data: netCDF4.Variable or numpy.array
:param grid: Grid that the data corresponds to
:type grid: pysgrid or pyugrid
:param location: The feature where the data aligns with the grid.
:type location: string
:param depth: Depth axis object from ``gridded.depth``
:type depth: Depth, S_Depth or L_Depth
:param dataset: Instance of open netCDF4.Dataset
:type dataset: netCDF4.Dataset
:param data_file: Name of data source file, if data and grid files are separate
:type data_file: string
:param grid_file: Name of grid source file, if data and grid files are separate
:type grid_file: string
'''
Grid = cls._default_component_types['grid']
Time = cls._default_component_types['time']
Depth = cls._default_component_types['depth']
if filename is not None:
data_file = filename
grid_file = filename
ds = None
dg = None
if dataset is None:
if grid_file == data_file:
ds = dg = get_dataset(grid_file)
else:
ds = get_dataset(data_file)
dg = get_dataset(grid_file)
else:
if grid_file is not None:
dg = get_dataset(grid_file)
else:
dg = dataset
ds = dataset
if data_file is None:
data_file = os.path.split(ds.filepath())[-1]
if grid is None:
grid = Grid.from_netCDF(grid_file,
dataset=dg,
grid_topology=grid_topology)
if varname is None:
varname = cls._gen_varname(data_file,
dataset=ds)
if varname is None:
raise NameError('Default current names are not in the data file, '
'must supply variable name')
data = ds[varname]
if name is None:
name = cls.__name__ + str(cls._def_count)
cls._def_count += 1
if units is None:
try:
units = data.units
except AttributeError:
units = None
if time is None:
time = Time.from_netCDF(filename=data_file,
dataset=ds,
datavar=data)
if time_origin is not None:
time = Time(data=time.data,
filename=time.filename,
varname=time.varname,
origin=time_origin)
if depth is None:
if (isinstance(grid, (Grid_S, Grid_R)) and len(data.shape) == 4 or
isinstance(grid, Grid_U) and len(data.shape) == 3):
depth = Depth.from_netCDF(grid_file,
dataset=dg,
)
if location is None:
if hasattr(data, 'location'):
location = data.location
# if len(data.shape) == 4 or (len(data.shape) == 3 and time is None):
# from gnome.environment.environment_objects import S_Depth
# depth = S_Depth.from_netCDF(grid=grid,
# depth=1,
# data_file=data_file,
# grid_file=grid_file,
# **kwargs)
if load_all:
data = data[:]
return cls(name=name,
units=units,
time=time,
data=data,
grid=grid,
depth=depth,
grid_file=grid_file,
data_file=data_file,
fill_value=fill_value,
location=location,
varname=varname,
**kwargs)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('{0.__class__.__module__}.{0.__class__.__name__}('
'name="{0.name}", '
'time="{0.time}", '
'units="{0.units}", '
'data="{0.data}", '
')').format(self)
@property
def location(self):
if self._location is None and self.data is not None and hasattr(self.data, 'location'):
return self.data.location
else:
return self._location
@location.setter
def location(self, location):
# Fixme: perhaps we need Variable subclasses,
# to distingish between variable types.
if location not in VALID_LOCATIONS:
raise ValueError("Invalid location: {}, must be one of: {}".format(location, VALID_LOCATIONS))
self._location = location
@property
def info(self):
"""
Information about the variable object
This could be filled out more
"""
try:
std_name = self.attributes['standard_name']
except KeyError:
std_name = None
msg = """
Variable:
filename: {0.filename}
varname: {0.varname}
standard name: {1}
units: {0.units}
grid: {0.grid}
data shape: {0.data.shape}
""".format(self, std_name)
return dedent(msg)
@property
def time(self):
return self._time
@time.setter
def time(self, t):
Time = self.__class__._default_component_types['time']
if t is None:
self._time = None
return
if self.data is not None and len(t) != self.data.shape[0] and len(t) > 1:
raise ValueError("Data/time interval mismatch")
if isinstance(t, Time):
self._time = t
elif isinstance(t, collections.Iterable) or isinstance(t, nc4.Variable):
self._time = Time(t)
else:
raise ValueError("Time must be set with an iterable container or netCDF variable")
@property
def data(self):
return self._data
@data.setter
def data(self, d):
d = asarraylike(d)
# Fixme: maybe all this checking should be done when it gets added to the Dataset??
if self.time is not None and len(d) != len(self.time):
raise ValueError("Data/time interval mismatch")
## fixme: we should check Depth, too.
# if self.grid is not None and self.grid.infer_location(d) is None:
# raise ValueError("Data/grid shape mismatch. Data shape is {0}, Grid shape is {1}".format(d.shape, self.grid.node_lon.shape))
if self.grid is not None: # if there is not a grid, we can't check this
if self.location is None: # not set, let's try to figure it out
self.location = self.grid.infer_location(d)
if self.location is None:
raise ValueError("Data/grid shape mismatch: Data shape is {0}, "
"Grid shape is {1}".format(d.shape, self.grid.node_lon.shape))
self._data = d
@property
def units(self):
'''
Units of underlying data
:rtype: string
'''
return self._units
@units.setter
def units(self, unit):
# if unit is not None:
# if not unit_conversion.is_supported(unit):
# raise ValueError('Units of {0} are not supported'.format(unit))
self._units = unit
@property
def grid_shape(self):
if hasattr(self.grid, 'shape'):
return self.grid.shape
else:
return self.grid.node_lon.shape
@property
def data_shape(self):
return self.data.shape
@property
def is_data_on_nodes(self):
return self.grid.infer_location(self._data) == 'node'
def _get_hash(self, points, time):
"""
Returns a SHA1 hash of the array of points passed in
"""
return (hashlib.sha1(points.tobytes()).hexdigest(),
hashlib.sha1(str(time).encode('utf-8')).hexdigest())
def _memoize_result(self, points, time, result, D, _copy=False, _hash=None):
if _copy:
result = result.copy()
result.setflags(write=False)
if _hash is None:
_hash = self._get_hash(points, time)
if D is not None and len(D) > 4:
D.popitem(last=False)
D[_hash] = result
D[_hash].setflags(write=False)
def _get_memoed(self, points, time, D, _copy=False, _hash=None):
if _hash is None:
_hash = self._get_hash(points, time)
if (D is not None and _hash in D):
return D[_hash].copy() if _copy else D[_hash]
else:
return None
def center_values(self, time, units=None, extrapolate=False):
"""
interpolate data to the center of the cells
:param time: the time to interpolate at
**Warning:** NOT COMPLETE
NOTE: what if this data is already on the cell centers?
"""
raise NotImplementedError("center_values is not finished")
if not extrapolate:
self.time.valid_time(time)
if len(self.time) == 1:
if len(self.data.shape) == 2:
if isinstance(self.grid, Grid_S):
# curv grid
value = self.data[0:1:-2, 1:-2]
else:
value = self.data
else:
centers = self.grid.get_center_points()
value = self.at(centers, time, units)
return value
@property
def dimension_ordering(self):
'''
Returns a list that describes the dimensions of the property's data.
If a dimension_ordering is assigned, it will continue to use that.
If no dimension_ordering is set, then a default ordering will be generated
based on the object properties and data shape.
For example, if the data has 4 dimensions and is represented by a
Grid_S (structured grid), and the Variable has a depth and time assigned,
then the assumed ordering is ['time','depth','lon','lat']
If the data has 3 dimensions, self.grid is a Grid_S, and self.time is None,
then the ordering is ['depth','lon','lat']
If the data has 3 dimensions, self.grid is a Grid_U, the ordering is
['time','depth','ele']
'''
if not hasattr(self, '_order'):
self._order = None
if self._order is not None:
return self._order
else:
if isinstance(self.grid, (Grid_S, Grid_R)):
order = ['time', 'depth', 'lon', 'lat']
else:
order = ['time', 'depth', 'ele']
ndim = len(self.data.shape)
diff = len(order) - ndim
if diff == 0:
return order
elif diff == 1:
if self.time is not None:
del order[1]
elif self.depth is not None:
del order[0]
else:
raise ValueError('Generated ordering too short to fit data. '
'Time or depth must not be None')
elif diff == 2:
order = order[2:]
else:
raise ValueError('Too many/too few dimensions ndim={0}'.format(ndim))
return order
@dimension_ordering.setter
def dimension_ordering(self, order):
self._order = order
# @profile
def at(self,
points,
time,
units=None,
extrapolate=False,
_hash=None,
_mem=True,
_auto_align=True,
unmask=False,
**kwargs):
"""
Find the value of the property at positions P at time T
:param points: Coordinates to be queried (P)
:type points: Nx2 array of double
:param time: The time at which to query these points (T)
:type time: datetime.datetime object
:param units: units the values will be returned in (or converted to)
:type units: string such as ('m/s', 'knots', etc)
:param extrapolate: if True, extrapolation will be supported
:type extrapolate: boolean (True or False)
:return: returns a Nx1 array of interpolated values
:rtype: double
"""
pts = _reorganize_spatial_data(points)
if _hash is None:
_hash = self._get_hash(pts, time)
if _mem:
res = self._get_memoed(pts, time, self._result_memo, _hash=_hash)
if res is not None:
return res
order = self.dimension_ordering
if order[0] == 'time':
value = self._time_interp(pts, time, extrapolate, _mem=_mem, _hash=_hash, **kwargs)
elif order[0] == 'depth':
value = self._depth_interp(pts, time, extrapolate, _mem=_mem, _hash=_hash, **kwargs)
else:
value = self._xy_interp(pts, time, extrapolate, _mem=_mem, _hash=_hash, **kwargs)
if _auto_align == True:
value = _align_results_to_spatial_data(value.copy(), points)
if isinstance(value, np.ma.MaskedArray):
np.ma.set_fill_value(value, self.fill_value)
if unmask:
value = np.ma.filled(value)
if _mem:
self._memoize_result(pts, time, value, self._result_memo, _hash=_hash)
return value
interpolate = at #common request
def _xy_interp(self, points, time, extrapolate, slices=(), **kwargs):
'''
Uses the py(s/u)grid interpolation to determine the values at the points, and returns it
:param points: Coordinates to be queried (3D)
:param time: Time of the query
:param extrapolate: Turns extrapolation on or off
:param slices: describes how the data needs to be sliced to reach the appropriate dimension
:type points: Nx3 array of double
:type time: datetime.datetime object
:type extrapolate: boolean
:type slices: tuple of integers or slice objects
'''
_hash = kwargs['_hash'] if '_hash' in kwargs else None
units = kwargs['units'] if 'units' in kwargs else None
value = self.grid.interpolate_var_to_points(points[:, 0:2],
self.data,
location=self.location,
_hash=self._get_hash(points[:, 0:2],
time),
slices=slices, _memo=True)
return value
def _time_interp(self, points, time, extrapolate, slices=(), **kwargs):
'''
Uses the Time object to interpolate the result of the next level of interpolation, as specified
by the dimension_ordering attribute.
:param points: Coordinates to be queried (3D)
:param time: Time of the query
:param extrapolate: Turns extrapolation on or off
:param slices: describes how the data needs to be sliced to reach the appropriate dimension
:type points: Nx3 array of double
:type time: datetime.datetime object
:type extrapolate: boolean
:type slices: tuple of integers or slice objects
'''
order = self.dimension_ordering
idx = order.index('time')
if order[idx + 1] != 'depth':
val_func = self._xy_interp
else:
val_func = self._depth_interp
if time == self.time.min_time or (extrapolate and time < self.time.min_time):
# min or before
return val_func(points, time, extrapolate, slices=(0,), ** kwargs)
elif time == self.time.max_time or (extrapolate and time > self.time.max_time):
return val_func(points, time, extrapolate, slices=(-1,), **kwargs)
else:
ind = self.time.index_of(time)
s1 = slices + (ind,)
s0 = slices + (ind - 1,)
v0 = val_func(points, time, extrapolate, slices=s0, **kwargs)
v1 = val_func(points, time, extrapolate, slices=s1, **kwargs)
alphas = self.time.interp_alpha(time, extrapolate)
value = v0 + (v1 - v0) * alphas
return value
def _depth_interp(self, points, time, extrapolate, slices=(), **kwargs):
'''
Uses the Depth object to interpolate the result of the next level of interpolation, as specified
by the dimension_ordering attribute.
:param points: Coordinates to be queried (3D)
:param time: Time of the query
:param extrapolate: Turns extrapolation on or off
:param slices: describes how the data needs to be sliced to reach the appropriate dimension
:type points: Nx3 array of double
:type time: datetime.datetime object
:type extrapolate: boolean
:type slices: tuple of integers or slice objects
'''
order = self.dimension_ordering
idx = order.index('depth')
if order[idx + 1] != 'time':
val_func = self._xy_interp
else:
val_func = self._time_interp
indices, alphas = self.depth.interpolation_alphas(points, time, self.data.shape[1:], _hash=kwargs.get('_hash', None), extrapolate=extrapolate)
if indices is None and alphas is None:
# all particles are on surface
return val_func(points, time, extrapolate, slices=slices + (self.depth.surface_index,), **kwargs)
else:
min_idx = indices[indices != -1].min() - 1
max_idx = indices.max()
values = np.zeros(len(points), dtype=np.float64)
v0 = val_func(points, time, extrapolate, slices=slices + (min_idx - 1,), **kwargs)
for idx in range(min_idx + 1, max_idx + 1):
v1 = val_func(points, time, extrapolate, slices=slices + (idx,), **kwargs)
pos_idxs = np.where(indices == idx)[0]
sub_vals = v0 + (v1 - v0) * alphas
if len(pos_idxs) > 0:
values.put(pos_idxs, sub_vals.take(pos_idxs))
v0 = v1
underground = (indices == self.depth.bottom_index)
values[underground] = self.fill_value
return values
def transect(self, times, depths, points):
output_shape = (len(times), len(depths), len(points))
outarr = np.array(shape=output_shape)
for t in range(0,len(times)):
for d in range(0, len(depths)):
pts = np.array(shape=(len(points),3))
pts[:,0:2] = points
pts[:,2] = depths[d]
layer = d
@classmethod
def _gen_varname(cls,
filename=None,
dataset=None,
names_list=None,
std_names_list=None):
"""
Function to find the default variable names if they are not provided.
:param filename: Name of file that will be searched for variables
:param dataset: Existing instance of a netCDF4.Dataset
:type filename: string
:type dataset: netCDF.Dataset
:return: List of default variable names, or None if none are found
"""
df = None
if dataset is not None:
df = dataset
else:
df = get_dataset(filename)
if names_list is None:
names_list = cls.default_names
if std_names_list is None:
std_names_list = cls.cf_names
for n in names_list:
if n in df.variables.keys():
return n
for n in std_names_list:
for var in df.variables.values():
if (hasattr(var, 'standard_name') and var.standard_name == n or
hasattr(var, 'long_name') and var.long_name == n):
return var.name
raise ValueError("Default names not found.")
class VectorVariable(object):
default_names = {}
cf_names = {}
comp_order=[]
_def_count = 0
''''
These are the classes which are used when internal components are created
by default, such as automatically from a file or other python data structure.
Subclasses of this type may override this to use different classes for it's
components
'''
_default_component_types = {'time': Time,
'grid': Grid,
'depth': Depth,
'variable': Variable}
def __init__(self,
name=None,
units=None,
time=None,
variables=None,
grid=None,
depth=None,
grid_file=None,
data_file=None,
dataset=None,
varnames=None,
**kwargs):
super(VectorVariable, self).__init__()
self.name = self._units = self._time = self._variables = None
self.name = name
if all([isinstance(v, Variable) for v in variables]):
if time is not None and not isinstance(time, Time):
time = Time(time)
units = variables[0].units if units is None else units
time = variables[0].time if time is None else time
if units is None:
units = variables[0].units
self._units = units
if variables is None or len(variables) < 2:
raise ValueError('Variables must be an array-like of 2 or more Variable objects')
self.variables = variables
self._time = time
unused_args = kwargs.keys() if kwargs is not None else None
if len(unused_args) > 0:
kwargs = {}
if isinstance(self.variables[0], Variable):
self.grid = self.variables[0].grid if grid is None else grid
self.depth = self.variables[0].depth if depth is None else depth
self.grid_file = self.variables[0].grid_file if grid_file is None else grid_file
self.data_file = self.variables[0].data_file if data_file is None else data_file
self._result_memo = collections.OrderedDict()
for i, comp in enumerate(self.__class__.comp_order):
setattr(self, comp, self.variables[i])
@classmethod
def from_netCDF(cls,
filename=None,
varnames=None,
grid_topology=None,
name=None,
units=None,
time=None,
time_origin=None,
grid=None,
depth=None,
data_file=None,
grid_file=None,
dataset=None,
load_all=False,
variables=None,
**kwargs
):
'''
Allows one-function creation of a VectorVariable from a file.
:param filename: Default data source. Parameters below take precedence
:param varnames: Names of the variables in the data source file
:param grid_topology: Description of the relationship between grid attributes and variable names.
:param name: Name of property
:param units: Units
:param time: Time axis of the data
:param data: Underlying data source
:param grid: Grid that the data corresponds with
:param dataset: Instance of open Dataset
:param data_file: Name of data source file
:param grid_file: Name of grid source file
:type filename: string
:type varnames: [] of string
:type grid_topology: {string : string, ...}
:type name: string
:type units: string
:type time: [] of datetime.datetime, netCDF4 Variable, or Time object
:type data: netCDF4.Variable or numpy.array
:type grid: pysgrid or pyugrid
:type dataset: netCDF4.Dataset
:type data_file: string
:type grid_file: string
'''
Grid = cls._default_component_types['grid']
Time = cls._default_component_types['time']
Variable = cls._default_component_types['variable']
Depth = cls._default_component_types['depth']
if filename is not None:
data_file = filename
grid_file = filename
ds = None
dg = None
if dataset is None:
if grid_file == data_file:
ds = dg = get_dataset(grid_file)
else:
ds = get_dataset(data_file)
dg = get_dataset(grid_file)
else:
if grid_file is not None:
dg = get_dataset(grid_file)
else:
dg = dataset
ds = dataset
if grid is None:
grid = Grid.from_netCDF(grid_file,
dataset=dg,
grid_topology=grid_topology)
if varnames is None:
varnames = cls._gen_varnames(data_file,
dataset=ds)
if all([v is None for v in varnames]):
raise ValueError('No compatible variable names found!')
if name is None:
name = cls.__name__ + str(cls._def_count)
cls._def_count += 1
data = ds[varnames[0]]
if time is None:
time = Time.from_netCDF(filename=data_file,
dataset=ds,
datavar=data)
if time_origin is not None:
time = Time(data=time.data, filename=data_file, varname=time.varname, origin=time_origin)
if depth is None:
if (isinstance(grid, (Grid_S, Grid_R)) and len(data.shape) == 4 or
isinstance(grid, Grid_U) and len(data.shape) == 3):
depth = Depth.from_netCDF(grid_file,
dataset=dg,
)
# if depth is None:
# if (isinstance(grid, Grid_S) and len(data.shape) == 4 or
# (len(data.shape) == 3 and time is None) or
# (isinstance(grid, Grid_U) and len(data.shape) == 3 or
# (len(data.shape) == 2 and time is None))):
# from gnome.environment.environment_objects import S_Depth
# depth = S_Depth.from_netCDF(grid=grid,
# depth=1,
# data_file=data_file,
# grid_file=grid_file,
# **kwargs)
if variables is None:
variables = []
for vn in varnames:
if vn is not None:
# Fixme: We're calling from_netCDF from itself ?!?!?
variables.append(Variable.from_netCDF(filename=filename,
varname=vn,
grid_topology=grid_topology,
units=units,
time=time,
grid=grid,
depth=depth,
data_file=data_file,
grid_file=grid_file,
dataset=ds,
load_all=load_all,
location=None,
**kwargs))
if units is None:
units = [v.units for v in variables]
if all(u == units[0] for u in units):
units = units[0]
return cls(name=name,
filename=filename,
varnames=varnames,
grid_topology=grid_topology,
units=units,
time=time,
grid=grid,
depth=depth,
variables=variables,
data_file=data_file,
grid_file=grid_file,
dataset=ds,
load_all=load_all,
location=None,
**kwargs)
@classmethod
def _gen_varnames(cls,
filename=None,
dataset=None,
names_dict=None,
std_names_dict=None):
"""
Function to find the default variable names if they are not provided.
:param filename: Name of file that will be searched for variables
:param dataset: Existing instance of a netCDF4.Dataset
:type filename: string
:type dataset: netCDF.Dataset
:return: dict of component to name mapping (eg {'u': 'water_u', 'v': 'water_v', etc})
"""
df = None
if dataset is not None:
df = dataset
else:
df = get_dataset(filename)
if names_dict is None:
names_dict = cls.default_names
if std_names_dict is None:
std_names_dict = cls.cf_names
rd = {}
for k in cls.comp_order:
v = names_dict[k] if k in names_dict else []
for n in v:
if n in df.variables.keys():
rd[k] = n
continue
if k not in rd.keys():
rd[k] = None
for k in cls.comp_order:
v = std_names_dict[k] if k in std_names_dict else []
if rd[k] is None:
for n in v:
for var in df.variables.values():
if (hasattr(var, 'standard_name') and var.standard_name == n or
hasattr(var, 'long_name') and var.long_name == n):
rd[k] = var.name
break
return collections.namedtuple('varnames', cls.comp_order)(**rd)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('{0.__class__.__module__}.{0.__class__.__name__}('
'name="{0.name}", '
'time="{0.time}", '
'units="{0.units}", '
'variables="{0.variables}", '
'grid="{0.grid}", '
')').format(self)
@property
def location(self):
return [v.location for v in self.variables]
locations = location
@property
def is_data_on_nodes(self):
return self.grid.infer_location(self.variables[0].data) == 'node'
@property
def time(self):
return self._time
@time.setter
def time(self, t):
Time = self.__class__._default_component_types['time']
if self.variables is not None:
for v in self.variables:
try:
v.time = t
except ValueError as e:
raise ValueError('''Time was not compatible with variables.
Set variables attribute to None to allow changing other attributes
Original error: {0}'''.format(str(e)))
if isinstance(t, Time):
self._time = t
elif isinstance(t, collections.Iterable) or isinstance(t, nc4.Variable):
self._time = Time(t)
else:
raise ValueError("Time must be set with an iterable container or netCDF variable")
@property
def units(self):
'''
Units of underlying data
:rtype: string
'''
if hasattr(self._units, '__iter__'):
if len(set(self._units)) > 1:
return self._units
else:
return self._units[0]
else:
return self._units
@units.setter
def units(self, unit):
self._units = unit
if self.variables is not None:
for v in self.variables:
v.units = unit
@property
def varnames(self):
'''
Names of underlying variables
:rtype: [] of strings
'''
return [v.varname if hasattr(v, 'varname') else v.name for v in self.variables]
@property
def data_shape(self):
if self.variables is not None:
return self.variables[0].data.shape
else:
return None
def _get_hash(self, points, time):
"""
Returns a SHA1 hash of the array of points passed in
"""
return (hashlib.sha1(points.tobytes()).hexdigest(),
hashlib.sha1(str(time).encode('utf-8')).hexdigest())
def _memoize_result(self, points, time, result, D, _copy=True, _hash=None):
if _copy:
result = result.copy()
result.setflags(write=False)
if _hash is None:
_hash = self._get_hash(points, time)
if D is not None and len(D) > 8:
D.popitem(last=False)
D[_hash] = result
def _get_memoed(self, points, time, D, _copy=True, _hash=None):
if _hash is None:
_hash = self._get_hash(points, time)
if (D is not None and _hash in D):
return D[_hash].copy() if _copy else D[_hash]
else:
return None
def at(self, points, time, units=None, extrapolate=False, memoize=True, _hash=None, _auto_align=True, **kwargs):
pts = _reorganize_spatial_data(points)
mem = memoize
if hash is None:
_hash = self._get_hash(points, time)
if mem:
res = self._get_memoed(points, time, self._result_memo, _hash=_hash)
if res is not None:
return res
value = np.column_stack([var.at(points=points,
time=time,
units=units,
extrapolate=extrapolate,
memoize=memoize,
_hash=_hash,
**kwargs) for var in self.variables])
if _auto_align == True:
value = _align_results_to_spatial_data(value.copy(), points)
if mem:
self._memoize_result(points, time, value, self._result_memo, _hash=_hash)
return value
@classmethod
def _get_shared_vars(cls, *sh_args):
default_shared = ['dataset', 'data_file', 'grid_file', 'grid']
if len(sh_args) != 0:
shared = sh_args
else:
shared = default_shared
def getvars(func):
@wraps(func)
def wrapper(*args, **kws):
def _mod(n):
k = kws
s = shared
return (n in s) and ((n not in k) or (n in k and k[n] is None))
if 'filename' in kws and kws['filename'] is not None:
kws['data_file'] = kws['grid_file'] = kws['filename']
ds = dg = None
if _mod('dataset'):
if 'grid_file' in kws and 'data_file' in kws:
if kws['grid_file'] == kws['data_file']:
ds = dg = get_dataset(kws['grid_file'])
else:
ds = get_dataset(kws['data_file'])
dg = get_dataset(kws['grid_file'])
kws['dataset'] = ds
else:
if 'grid_file' in kws and kws['grid_file'] is not None:
dg = get_dataset(kws['grid_file'])
else:
dg = kws['dataset']
ds = kws['dataset']
if _mod('grid'):
gt = kws.get('grid_topology', None)
kws['grid'] = Grid.from_netCDF(kws['filename'], dataset=dg, grid_topology=gt)
if kws.get('varnames', None) is None:
varnames = cls._gen_varnames(kws['data_file'],
dataset=ds)
# if _mod('time'):
# time = Time.from_netCDF(filename=kws['data_file'],
# dataset=ds,
# varname=data)
# kws['time'] = time
return func(*args, **kws)
return wrapper
return getvars
def save(self, filepath, format='netcdf4'):
"""
Save the variable object to a netcdf file.
:param filepath: path to file you want o save to. or a writable
netCDF4 Dataset An existing one
If a path, an existing file will be clobbered.
Follows the convention established by the netcdf UGRID working group:
http://ugrid-conventions.github.io/ugrid-conventions
"""
format_options = ('netcdf3', 'netcdf4')
if format not in format_options:
raise ValueError("format: {} not supported. Options are: {}".format(format, format_options))
| NOAA-ORR-ERD/gridded | gridded/variable.py | Python | unlicense | 44,005 | [
"NetCDF"
] | 08f531710d3d6be3db6c7f229ab8f6f118f80185a90b99337d68c0b31d861920 |
# creates: nice.png
import numpy as np
from ase import Atoms
from ase.io import write
atoms = Atoms('Ag', cell=(2.7, 2.7, 2.7), pbc=True) * (18, 8, 8)
# view with ag
#view(atoms)
rotation = '-70x, -20y, -2z' # found using ag menu 'view -> rotate'
#Make colors
from ase.utils import hsv
colors = hsv(atoms.positions[:, 0])
# Textures
tex = ['jmol',] * 288 + ['glass',] * 288+ ['ase3',] * 288 + ['vmd',] * 288
# keywords
kwargs = { # Keywords that exist for eps, png, and pov
'rotation': rotation,
'show_unit_cell': 2,
'colors': colors,
'radii': None,
}
extra_kwargs = { # For povray files only
'display' : False, # Display while rendering
'pause' : False, # Pause when done rendering (only if display)
'transparent' : False, # Transparent background
'canvas_width' : None, # Width of canvas in pixels
'canvas_height': None, # Height of canvas in pixels
'camera_dist' : 50., # Distance from camera to front atom
'image_plane' : None, # Distance from front atom to image plane
# (focal depth for perspective)
'camera_type' : 'perspective', # perspective, ultra_wide_angle
'point_lights' : [], # [[loc1, color1], [loc2, color2],...]
'area_light' : [(2., 3., 40.) ,# location
'White', # color
.7, .7, 3, 3], # width, height, Nlamps_x, Nlamps_y
'background' : 'White', # color
'textures' : tex, # Length of atoms list of texture names
'celllinewidth': 0.05, # Radius of the cylinders representing the cell
}
# Make flat png file
#write('flat.png', atoms, **kwargs)
# Make the color of the glass beads semi-transparent
colors2 = np.zeros((1152, 4))
colors2[:, :3] = colors
colors2[288: 576, 3] = 0.95
kwargs['colors'] = colors2
kwargs.update(extra_kwargs)
# Make the raytraced image
write('nice.pov', atoms, run_povray=True, **kwargs)
| conwayje/ase-python | doc/tutorials/saving_graphics.py | Python | gpl-2.0 | 1,856 | [
"ASE",
"Jmol",
"VMD"
] | 9e5747cd1943bb0445e8357a28f93e26f204724ab35e1c7a05d69d20b6d2a9d1 |
from datetime import timedelta
import functools
import itertools
import warnings
import numpy as np
import pandas as pd
from . import common
from . import indexing
from . import ops
from . import utils
from .pycompat import basestring, OrderedDict, zip, dask_array_type
from .indexing import (PandasIndexAdapter, orthogonally_indexable)
import xray # only for Dataset and DataArray
def as_variable(obj, key=None, strict=True):
"""Convert an object into an Variable
- If the object is already an `Variable`, return it.
- If the object is a `DataArray`, return it if `strict=False` or return
its variable if `strict=True`.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new `Variable`.
- If all else fails, attempt to convert the object into an `Variable` by
unpacking it into the arguments for `Variable.__init__`.
"""
# TODO: consider extending this method to automatically handle Iris and
# pandas objects.
if strict and hasattr(obj, 'variable'):
# extract the primary Variable from DataArrays
obj = obj.variable
if not isinstance(obj, (Variable, xray.DataArray)):
if hasattr(obj, 'dims') and (hasattr(obj, 'data') or
hasattr(obj, 'values')):
obj = Variable(obj.dims, getattr(obj, 'data', obj.values),
getattr(obj, 'attrs', None),
getattr(obj, 'encoding', None))
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except TypeError:
raise TypeError('cannot convert argument into an Variable')
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif getattr(obj, 'name', None) is not None:
obj = Variable(obj.name, obj)
elif key is not None:
obj = Variable(key, obj)
else:
raise TypeError('cannot infer Variable dimensions')
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, 'ndim', 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
# add a custom fast-path for dask.array to avoid expensive checks for the
# dtype attribute
if isinstance(data, dask_array_type):
return data
if isinstance(data, pd.Index):
if isinstance(data, pd.MultiIndex):
raise NotImplementedError(
'no support yet for using a pandas.MultiIndex in an '
'xray.Coordinate')
return _maybe_wrap_data(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, 'ns')
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, 'value', data), 'ns')
if (not hasattr(data, 'dtype') or not hasattr(data, 'shape') or
isinstance(data, (np.string_, np.unicode_,
np.datetime64, np.timedelta64))):
# data must be ndarray-like
data = np.asarray(data)
# we don't want nested self-described arrays
data = getattr(data, 'values', data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = common._maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == 'O':
data = common._possibly_convert_objects(data)
elif data.dtype.kind == 'M':
data = np.asarray(data, 'datetime64[ns]')
elif data.dtype.kind == 'm':
data = np.asarray(data, 'timedelta64[ns]')
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0-dimensional object array or datetime64.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
"""
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == 'O':
# unpack 0d object arrays to be consistent with numpy
data = data.item()
elif data.dtype.kind == 'M':
# convert to a np.datetime64 object, because 0-dimensional ndarrays
# with dtype=datetime64 are broken :(
data = np.datetime64(data, 'ns')
elif data.dtype.kind == 'm':
data = np.timedelta64(data, 'ns')
return data
class Variable(common.AbstractArray, utils.NdimSizeLenMixin):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well behaviored code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = _as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, PandasIndexAdapter))
@property
def data(self):
if isinstance(self._data, dask_array_type):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = _as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
"replacement data must match the Variable's shape")
self._data = data
def _data_cached(self):
if not isinstance(self._data, np.ndarray):
self._data = np.asarray(self._data)
return self._data
@property
def _indexable_data(self):
return orthogonally_indexable(self._data)
def load(self):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xray functions should either work on deferred data or
load data automatically.
"""
self._data_cached()
return self
def load_data(self): # pragma: no cover
warnings.warn('the Variable method `load_data` has been deprecated; '
'use `load` instead',
FutureWarning, stacklevel=2)
return self.load()
def __getstate__(self):
"""Always cache data as an in-memory array before pickling"""
self._data_cached()
# self.__dict__ is the default pickle object, we don't need to
# implement our own __setstate__ method to make pickle work
return self.__dict__
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data_cached())
@values.setter
def values(self, values):
self.data = values
def to_variable(self):
"""Return this variable as a base xray.Variable"""
return Variable(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
def to_coord(self):
"""Return this variable as an xray.Coordinate"""
return Coordinate(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_coord().to_index()
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated.
"""
return self._dims
def _parse_dimensions(self, dims):
if isinstance(dims, basestring):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError('dimensions %s must have the same length as the '
'number of data dimensions, ndim=%s'
% (dims, self.ndim))
return dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def __getitem__(self, key):
"""Return a new Array object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement "orthogonal indexing" like
netCDF4-python, where the key can only include integers, slices
(including `Ellipsis`) and 1d arrays, each of which are applied
orthogonally along their respective dimensions.
The difference does not matter in most cases unless you are using
numpy's "fancy indexing," which can otherwise result in data arrays
whose shapes is inconsistent (or just uninterpretable with) with the
variable's dimensions.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
key = self._item_key_to_tuple(key)
key = indexing.expanded_indexer(key, self.ndim)
dims = tuple(dim for k, dim in zip(key, self.dims)
if not isinstance(k, (int, np.integer)))
values = self._indexable_data[key]
# orthogonal indexing should ensure the dimensionality is consistent
if hasattr(values, 'ndim'):
assert values.ndim == len(dims), (values.ndim, len(dims))
else:
assert len(dims) == 0, len(dims)
return type(self)(dims, values, self._attrs, self._encoding,
fastpath=True)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
key = self._item_key_to_tuple(key)
if isinstance(self._data, dask_array_type):
raise TypeError("this variable's data is stored in a dask array, "
'which does not support item assignment. To '
'assign to this variable, you must first load it '
'into memory explicitly using the .load_data() '
'method or accessing its .values attribute.')
data = orthogonally_indexable(self._data_cached())
data[key] = value
@property
def attrs(self):
"""Dictionary of local attributes on this variable.
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable.
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError('encoding must be castable to a dictionary')
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
data = self.values.copy() if deep else self._data
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatability with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, 'chunks', None)
_array_counter = itertools.count()
def chunk(self, chunks=None, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xray.Variable
"""
import dask.array as da
if utils.is_dict_like(chunks):
chunks = dict((self.get_axis_num(dim), chunk)
for dim, chunk in chunks.items())
if chunks is None:
chunks = self.chunks or self.shape
data = self._data
if isinstance(data, da.Array):
data = data.rechunk(chunks)
else:
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s)
for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock)
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def isel(self, **indexers):
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
invalid = [k for k in indexers if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
key = [slice(None)] * self.ndim
for i, dim in enumerate(self.dims):
if dim in indexers:
key[i] = indexers[dim]
return self[tuple(key)]
def transpose(self, *dims):
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
axes = self.get_axis_num(dims)
data = ops.transpose(self.data, axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def squeeze(self, dim=None):
"""Return a new Variable object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : Variable
This array, but with with all or a subset of the dimensions of
length 1 removed.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.squeeze
"""
dims = dict(zip(self.dims, self.shape))
return common.squeeze(self, dims, dim)
def expand_dims(self, dims, shape=None):
"""Return a new variable with expanded dimensions.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, basestring):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions must be a superset of existing '
'dimensions')
self_dims = set(self.dims)
expanded_dims = tuple(
d for d in dims if d not in self_dims) + self.dims
if shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = [dims_map[d] for d in expanded_dims]
expanded_data = ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[
(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(expanded_dims, expanded_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims)
def fillna(self, value):
return self._fillna(value)
def where(self, cond):
return self._where(cond)
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
allow_lazy=False, **kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
data = func(self.data if allow_lazy else self.values,
axis=axis, **kwargs)
removed_axes = (range(self.ndim) if axis is None
else np.atleast_1d(axis) % self.ndim)
dims = [dim for n, dim in enumerate(self.dims)
if n not in removed_axes]
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim='concat_dim', positions=None,
shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, basestring):
dim, = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
# TODO: use our own type promotion rules to ensure that
# [str, float] -> object, not str like numpy
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
if positions is None:
data = ops.concatenate(arrays, axis=axis)
else:
data = ops.interleaved_concat(arrays, positions, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = ops.stack(arrays, axis=axis)
attrs = OrderedDict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
return cls(dims, data, attrs)
def _data_equals(self, other):
return (self._data is other._data or
ops.array_equiv(self.data, other.data))
def equals(self, other):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisions (like numpy.ndarrays).
"""
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and self._data_equals(other))
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other)
def identical(self, other):
"""Like equals, but also checks attributes.
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs) and
self.equals(other))
except (TypeError, AttributeError):
return False
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
return self.__array_wrap__(f(self.data, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xray.DataArray, xray.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
new_data = (f(self_data, other_data)
if not reflexive
else f(other_data, self_data))
result = Variable(dims, new_data)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xray.Dataset):
raise TypeError('cannot add a Dataset to a Variable in-place')
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError('dimensions cannot change for in-place '
'operations')
self.values = f(self_data, other_data)
return self
return func
ops.inject_all_ops_and_reduce_methods(Variable)
class Coordinate(Variable):
"""Wrapper around pandas.Index that adds xray specific functionality.
The most important difference is that Coordinate objects must always have a
name, which is the dimension along which they index values.
Coordinates must always be 1-dimensional. In addition to Variable methods
and properties (attributes, encoding, broadcasting), they support some
pandas.Index methods directly (e.g., get_indexer), even though pandas does
not (yet) support duck-typing for indexes.
"""
def __init__(self, name, data, attrs=None, encoding=None, fastpath=False):
super(Coordinate, self).__init__(name, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError('%s objects must be 1-dimensional' %
type(self).__name__)
def _data_cached(self):
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
return self._data
def __getitem__(self, key):
key = self._item_key_to_tuple(key)
values = self._indexable_data[key]
if not hasattr(values, 'ndim') or values.ndim == 0:
return Variable((), values, self._attrs, self._encoding)
else:
return type(self)(self.dims, values, self._attrs, self._encoding,
fastpath=True)
def __setitem__(self, key, value):
raise TypeError('%s values cannot be modified' % type(self).__name__)
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the values array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
# there is no need to copy the index values here even if deep=True
# since pandas.Index objects are immutable
data = PandasIndexAdapter(self) if deep else self._data
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_coord(self):
"""Return this variable as an xray.Coordinate"""
return self
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
return pd.Index(self._data_cached().array, name=self.dims[0])
# pandas.Index like properties:
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError('cannot modify name of Coordinate in-place')
def get_indexer(self, label):
return self.to_index().get_indexer(label)
def slice_indexer(self, start=None, stop=None, step=None):
return self.to_index().slice_indexer(start, stop, step)
def slice_locs(self, start=None, stop=None):
return self.to_index().slice_locs(start, stop)
def get_loc(self, label):
return self.to_index().get_loc(label)
@property
def is_monotonic(self):
return self.to_index().is_monotonic
def is_numeric(self):
return self.to_index().is_numeric()
def _unified_dims(variables):
# validate dimensions
all_dims = OrderedDict()
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions: %r' % list(var_dims))
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension %r: %s'
% (d, (all_dims[d], s)))
return all_dims
def _broadcast_compat_variables(*variables):
dims = tuple(_unified_dims(variables))
return tuple(var.expand_dims(dims) if var.dims != dims else var
for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearence in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(var.expand_dims(dims_map) if var.dims != dims_tuple else var
for var in variables)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr
in ['dims', 'data', 'shape', 'encoding']):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
| petercable/xray | xray/core/variable.py | Python | apache-2.0 | 35,035 | [
"NetCDF"
] | fa28af0016bda1b3b4dd29cbd380b9f59e3653292d2e9181e0422fbd0f2c934a |
#!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
- Check for a NumPy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
- Check docstring examples
- Check example blocks in RST files
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings::
$ python tools/refguide_check.py --doctests ma
or in RST-based documentations::
$ python tools/refguide_check.py --rst doc/source
"""
import copy
import doctest
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
import docutils.core
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from docutils.parsers.rst import directives
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
'f2py',
'linalg',
'lib',
'lib.recfunctions',
'fft',
'ma',
'polynomial',
'matrixlib',
'random',
'testing',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
#
# Optionally, a subset of methods can be skipped by setting dict-values
# to a container of method-names
DOCTEST_SKIPDICT = {
# cases where NumPy docstrings import things from SciPy:
'numpy.lib.vectorize': None,
'numpy.random.standard_gamma': None,
'numpy.random.gamma': None,
'numpy.random.vonmises': None,
'numpy.random.power': None,
'numpy.random.zipf': None,
# remote / local file IO with DataSource is problematic in doctest:
'numpy.lib.DataSource': None,
'numpy.lib.Repository': None,
}
if sys.version_info < (3, 9):
DOCTEST_SKIPDICT.update({
"numpy.core.ndarray": {"__class_getitem__"},
"numpy.core.dtype": {"__class_getitem__"},
"numpy.core.number": {"__class_getitem__"},
})
# Skip non-numpy RST files, historical release notes
# Any single-directory exact match will skip the directory and all subdirs.
# Any exact match (like 'doc/release') will scan subdirs but skip files in
# the matched directory.
# Any filename will skip that file
RST_SKIPLIST = [
'scipy-sphinx-theme',
'sphinxext',
'neps',
'changelog',
'doc/release',
'doc/source/release',
'doc/release/upcoming_changes',
'c-info.ufunc-tutorial.rst',
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'arrays.nditer.cython.rst',
# See PR 17222, these should be fixed
'basics.byteswapping.rst',
'basics.dispatch.rst',
'basics.indexing.rst',
'basics.subclassing.rst',
'basics.types.rst',
'misc.rst',
]
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
# NOTE: should NumPy have a better match between autosummary
# listings and __all__? For now, TR isn't convinced this is a
# priority -- focus on just getting docstrings executed / correct
r'numpy\.*',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
Parameters
----------
path : str or None
cwd : str or None
Returns
-------
str
Relative path or absolute path based on current working directory
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
"""
Finds the occurrences of function names, special directives like data
and functions and scipy constants in the docstrings of `module`. The
following patterns are searched for:
* 3 spaces followed by function name, and maybe some spaces, some
dashes, and an explanation; only function names listed in
refguide are formatted like this (mostly, there may be some false
positives
* special directives, such as data and function
* (scipy.constants only): quoted list
The `names_dict` is updated by reference and accessible in calling method
Parameters
----------
module : ModuleType
The module, whose docstrings is to be searched
names_dict : dict
Dictionary which contains module name as key and a set of found
function names and directives as value
Returns
-------
None
"""
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""
Return a copy of the __all__ dict with irrelevant items removed.
Parameters
----------
module : ModuleType
The module whose __all__ dict has to be processed
Returns
-------
deprecated : list
List of callable and deprecated sub modules
not_deprecated : list
List of non callable or non deprecated sub modules
others : list
List of remaining types of sub modules
"""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
if not all_dict:
# Must be a pure documentation module
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""
Return sets of objects from all_dict.
Will return three sets:
{in module_name.__all__},
{in REFGUIDE*},
and {missing from others}
Parameters
----------
all_dict : list
List of non deprecated sub modules for module_name
others : list
List of sub modules for module_name
names : set
Set of function names or special directives present in
docstring of module_name
module_name : ModuleType
Returns
-------
only_all : set
only_ref : set
missing : set
"""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
"""
Check if module `f` is deprecated
Parameters
----------
f : ModuleType
Returns
-------
bool
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
"""
Check that `all_dict` is consistent with the `names` in `module_name`
For instance, that there are no deprecated or extra objects.
Parameters
----------
all_dict : list
names : set
deprecated : list
others : list
module_name : ModuleType
dots : bool
Whether to print a dot for each check
Returns
-------
list
List of [(name, success_flag, output)...]
"""
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
"""
Validates the doc string in a snippet of documentation
`text` from file `name`
Parameters
----------
text : str
Docstring text
name : str
File name for which the doc string is to be validated
dots : bool
Whether to print a dot symbol for each check
Returns
-------
(bool, str)
"""
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Parameters
----------
module : ModuleType
names : set
Returns
-------
result : list
List of [(module_name, success_flag, output),...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,
'StringIO': io.StringIO,
}
class DTRunner(doctest.DocTestRunner):
"""
The doctest runner
"""
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
"""
Check the docstrings
"""
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
"# uninitialized", "#uninitialized"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = CHECK_NAMESPACE
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""
Run modified doctests for the set of `tests`.
Parameters
----------
tests : list
full_name : str
verbose : bool
doctest_warnings : bool
Returns
-------
tuple(bool, list)
Tuple of (success, output)
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
ns = {}
for t in tests:
# We broke the tests up into chunks to try to avoid PSEUDOCODE
# This has the unfortunate side effect of restarting the global
# namespace for each test chunk, so variables will be "lost" after
# a chunk. Chain the globals to avoid this
t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
# Process our options
if any([SKIPBLOCK in ex.options for ex in t.examples]):
continue
fails, successes = runner.run(t, out=output.write, clear_globs=False)
if fails > 0:
success = False
ns = t.globs
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in docstrings of the module's public symbols.
Parameters
----------
module : ModuleType
Name of module
verbose : bool
Should the result be verbose
ns : dict
Name space of module
dots : bool
doctest_warnings : bool
Returns
-------
results : list
List of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPDICT:
skip_methods = DOCTEST_SKIPDICT[full_name]
if skip_methods is None:
continue
else:
skip_methods = None
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
if skip_methods is not None:
tests = [i for i in tests if
i.name.partition(".")[2] not in skip_methods]
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Parameters
----------
fname : str
File name
verbose : bool
ns : dict
Name space
dots : bool
doctest_warnings : bool
Returns
-------
list
List of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
if ns is None:
ns = CHECK_NAMESPACE
results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPDICT:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
base_line_no = 0
for part in text.split('\n\n'):
try:
tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
except ValueError as e:
if e.args[0].startswith('line '):
# fix line number since `parser.get_doctest` does not increment
# the reported line number by base_line_no in the error message
parts = e.args[0].split()
parts[1] = str(int(parts[1]) + base_line_no)
e.args = (' '.join(parts),) + e.args[1:]
raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts.append((part, base_line_no))
base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
tests = []
for good_text, line_no in good_parts:
tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
"""
Generator function to walk `base_path` and its subdirectories, skipping
files or directories in RST_SKIPLIST, and yield each file with a suffix in
`suffixes`
Parameters
----------
base_path : str
Base path of the directory to be processed
verbose : int
suffixes : tuple
Yields
------
path
Path of the directory and its sub directories
"""
if os.path.exists(base_path) and os.path.isfile(base_path):
yield base_path
for dir_name, subdirs, files in os.walk(base_path, topdown=True):
if dir_name in RST_SKIPLIST:
if verbose > 0:
sys.stderr.write('skipping files in %s' % dir_name)
files = []
for p in RST_SKIPLIST:
if p in subdirs:
if verbose > 0:
sys.stderr.write('skipping %s and subdirs' % p)
subdirs.remove(p)
for f in files:
if (os.path.splitext(f)[1] in suffixes and
f not in RST_SKIPLIST):
yield os.path.join(dir_name, f)
def check_documentation(base_path, results, args, dots):
"""
Check examples in any *.rst located inside `base_path`.
Add the output to `results`.
See Also
--------
check_doctests_testfile
"""
for filename in iter_included_files(base_path, args.verbose):
if dots:
sys.stderr.write(filename + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(
filename,
(args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
# stub out a "module" which is needed when reporting the result
def scratch():
pass
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
def init_matplotlib():
"""
Check feasibility of matplotlib initialization.
"""
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
"""
Validates the docstrings of all the pre decided set of
modules for errors and docstring standards.
"""
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--rst", nargs='?', const='doc', default=None,
help=("Run also examples from *rst files "
"discovered walking the directory(s) specified, "
"defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in module_names:
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
dots = True
success = True
results = []
errormsgs = []
if args.doctests or args.rst:
init_matplotlib()
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
if args.doctests or not args.rst:
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others,
module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
if args.rst:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
if os.path.exists(rst_path):
print('\nChecking files in %s:' % rst_path)
check_documentation(rst_path, results, args, dots)
else:
sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
for module, mod_results in results:
success = all(x[1] for x in mod_results)
if not success:
errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if len(errormsgs) == 0:
print("\nOK: all checks passed!")
sys.exit(0)
else:
print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| charris/numpy | tools/refguide_check.py | Python | bsd-3-clause | 38,700 | [
"Gaussian"
] | da292c335fe807c55822627cd9d9b3694e1e8fbbbc457139e2587e325adc552e |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 03 09:25:36 2012
@author: Shreejoy
"""
from neuroelectro.models import Article, MeshTerm, Substance, Journal
from neuroelectro.models import Neuron, NeuronSyn, Unit
from neuroelectro.models import BrainRegion, InSituExpt, Protein, RegionExpr
from neuroelectro.models import DataTable, ArticleFullText, EphysConceptMap
from neuroelectro.models import NeuronArticleMap
from fuzzywuzzy import process
def assign_regions(neurons=None):
if neurons is None:
neurons = Neuron.objects.filter(regions__isnull = True)
matchThresh = 60
regionNames = [r.name for r in BrainRegion.objects.all()]
regionNames.remove(u'Nucleus x')
regionNames.remove(u'Nucleus y')
unmatchedNeurons = []
for n in neurons:
nName = n.name
#bestRegions = process.extract(nName,regionNames)
#print nName, bestRegions
bestRegion, matchVal = process.extractOne(nName,regionNames)
if matchVal > matchThresh:
nOb = Neuron.objects.get(name = n)
rOb = BrainRegion.objects.get(name = bestRegion)
print nName, bestRegion
nOb.regions.add(rOb)
nOb.save()
else:
unmatchedNeurons.append(nName)
return unmatchedNeurons
def assign_more_regions():
neuronNameList = \
[
'Hippocampus CA1 oriens lacunosum moleculare neuron',
'Substantia nigra pars compacta dopaminergic cell',
'Colliculus superior wide field vertical cell',
'Substantia nigra pars reticulata interneuron GABA',
'Trapezoid body principal cell',
]
neurons = [Neuron.objects.get(name = n) for n in neuronNameList]
assign_regions(neurons)
'Neocortex basket cell'
'Neocortex Martinotti cell'
'Neocortex bipolar neuron'
'Neocortex bipolar cell'
'Neocortex pyramidal cell layer 2-3'
'Neocortex layer 4 stellate cell'
'Neocortex stellate cell'
'Neocortex pyramidal cell layer 5-6'
'Neocortex Cajal-Retzius cell'
def assign_cortex_neuron_regions():
neuronNames = \
[
'Neocortex basket cell',
'Neocortex Martinotti cell',
'Neocortex bipolar neuron',
'Neocortex bipolar cell',
'Neocortex pyramidal cell layer 2-3',
'Neocortex layer 4 stellate cell',
'Neocortex stellate cell',
'Neocortex pyramidal cell layer 5-6',
'Neocortex Cajal-Retzius cell',
]
neuronRegionInds = \
[
[315],
[315],
[315],
[315],
[667,943,962,346,838,201,113,657,854,670,806,556,180,1106,600,251,643,755,905,1066,973,821,269,41,211,296,304,412,582,288,328,163,694,965,434,430,241,1127,888,427],
[865,654,1047,1094,950,577,1086,1035,148,1010,678,816,759,990,1114,401,573,721,869,501,635,234],
[865,654,1047,1094,950,577,1086,1035,148,1010,678,816,759,990,1114,401,573,721,869,501,635,234],
[648,767,921,702,1128,974,625,1111,1090,827,187,1058,252,847,791,1023,233,433,613,778,902,565,1015,772,363,630,620,1125,1101,344,800,774,610,687,683,289,692,988,844,1021,686,889,1038,478,1102,945,9,862,1054,638,857,156,954,249,520,601,1046,74,33,377,257,919,810,84,440,910,608,783,314,675,906,274,590,308,729,335],
[793],
]
#layer 5 only
#[648,767,921,702,1128,974,625,1111,1090,827,187,1058,252,847,791,1023,233,433,613,778,902,565,1015,772,363,630,620,1125,1101,344,800,774,610,687,683,289,692,988]
#layer 6a only
#[844,1021,686,889,1038,478,1102,945,9,862,1054,638,857,156,954,249,520,601,1046,74,33,377,257,919,810,84,440,910,608,783,314,675,906,274,590,308,729,335]
for ind in range(len(neuronNames)):
neuronOb = Neuron.objects.get(name = neuronNames[ind])
print neuronOb
for regionInd in neuronRegionInds[ind]:
regionOb = BrainRegion.objects.get(allenid = regionInd)
#print '\t' + regionOb.name
neuronOb.regions.add(regionOb)
neuronOb.save()
def assign_even_more_regions():
neuronNames = \
[
'Amygdaloid nucleus paracapsular intercalated cell',
'Hippocampus CA1 neurogliaform cell',
'Olfactory cortex semilunar cell',
'Olfactory cortex pyramidal cell',
'Hippocampus CA1 basket cell',
'Basalis nucleus cholinergic neuron',
'BNST common spiny neuron',
'BNST beaded neuron',
'Olfactory bulb (main) tufted cell (middle)',
'Olfactory bulb (main) periglomerular cell',
'Olfactory bulb main tufted cell external',
'Olfactory bulb (main) Blanes cell',
'Neocortex chandelier cell',
'Amygdala basolateral nucleus pyramidal neuron',
'Amygdala corticomedial nucleus pyramidal cell',
'Substantia nigra pars reticulata principal cell',
'Hypothalamus oxytocin neuroendocrine magnocellular cell'
]
neuronRegionNames = \
[
'Intercalated amygdalar nucleus',
'Field CA1, stratum lacunosum-moleculare',
'Piriform area, pyramidal layer',
'Piriform area, pyramidal layer',
'Field CA1, stratum radiatum',
'Substantia innominata',
'Bed nuclei of the stria terminalis',
'Bed nuclei of the stria terminalis',
'Main olfactory bulb, outer plexiform layer',
'Main olfactory bulb, glomerular layer',
'Main olfactory bulb, glomerular layer',
'Main olfactory bulb, granule layer',
'Isocortex',
'Basolateral amygdalar nucleus',
'Cortical amygdalar area, posterior part, medial zone',
'Substantia nigra, reticular part',
'Hypothalamus'
]
for ind in range(len(neuronNames)):
try:
neuronOb = Neuron.objects.get(name = neuronNames[ind])
except Exception:
continue
regionName = neuronRegionNames[ind]
regionOb = BrainRegion.objects.get(name = regionName)
print neuronOb
print '\t' + regionOb.name
neuronOb.regions = [regionOb]
neuronOb.save()
#r = BrainRegion.objects.get(name = "Caudoputamen")
#
#neurons = Neuron.objects.filter(name__icontains = "Neostriatum")
#
#for n in neurons:
# n.regions.clear()
# n.regions.add(r)
# n.save()
#
#regions = BrainRegion.objects.filter(name__icontains = "granular layer")
#regions = regions[1:]
#
#neuron = Neuron.objects.get(name = "Cerebellum granule cell")
#neuron.regions.clear()
#neuron.regions = regions
| lessc0de/neuroelectro_org | aba_functions/assign_neurons_allen_regions.py | Python | gpl-2.0 | 6,372 | [
"NEURON"
] | 0696ce99f891262dba4378a59f0a6fee662c1799330d1952ca29ece78a68f72f |
from math import sqrt
import numpy as np
__all__ = ['FixCartesian', 'FixBondLength', 'FixedMode', 'FixConstraintSingle',
'FixAtoms', 'UnitCellFilter', 'FixScaled', 'StrainFilter',
'FixedPlane', 'Filter', 'FixConstraint', 'FixedLine',
'FixBondLengths', 'FixInternals']
def slice2enlist(s):
"""Convert a slice object into a list of (new, old) tuples."""
if isinstance(s, (list, tuple)):
return enumerate(s)
if s.step == None:
step = 1
else:
step = s.step
if s.start == None:
start = 0
else:
start = s.start
return enumerate(range(start, s.stop, step))
class FixConstraint:
"""Base class for classes that fix one or more atoms in some way."""
def index_shuffle(self, ind):
"""Change the indices.
When the ordering of the atoms in the Atoms object changes,
this method can be called to shuffle the indices of the
constraints.
ind -- List or tuple of indices.
"""
raise NotImplementedError
def repeat(self, m, n):
""" basic method to multiply by m, needs to know the length
of the underlying atoms object for the assignment of
multiplied constraints to work.
"""
msg = ("Repeat is not compatible with your atoms' constraints."
' Use atoms.set_constraint() before calling repeat to '
'remove your constraints.')
raise NotImplementedError(msg)
def adjust_momenta(self, positions, momenta):
"""Adjusts momenta in identical manner to forces."""
self.adjust_forces(positions, momenta)
class FixConstraintSingle(FixConstraint):
"""Base class for classes that fix a single atom."""
def index_shuffle(self, ind):
"""The atom index must be stored as self.a."""
newa = -1 # Signal error
for new, old in slice2enlist(ind):
if old == self.a:
newa = new
break
if newa == -1:
raise IndexError('Constraint not part of slice')
self.a = newa
class FixAtoms(FixConstraint):
"""Constraint object for fixing some chosen atoms."""
def __init__(self, indices=None, mask=None):
"""Constrain chosen atoms.
Parameters
----------
indices : list of int
Indices for those atoms that should be constrained.
mask : list of bool
One boolean per atom indicating if the atom should be
constrained or not.
Examples
--------
Fix all Copper atoms:
>>> mask = [s == 'Cu' for s in atoms.get_chemical_symbols()]
>>> c = FixAtoms(mask=mask)
>>> atoms.set_constraint(c)
Fix all atoms with z-coordinate less than 1.0 Angstrom:
>>> c = FixAtoms(mask=atoms.positions[:, 2] < 1.0)
>>> atoms.set_constraint(c)
"""
if indices is None and mask is None:
raise ValueError('Use "indices" or "mask".')
if indices is not None and mask is not None:
raise ValueError('Use only one of "indices" and "mask".')
if mask is not None:
self.index = np.asarray(mask, bool)
else:
# Check for duplicates
srt = np.sort(indices)
for i in range(len(indices) - 1):
if srt[i] == srt[i + 1]:
raise ValueError(
'FixAtoms: The indices array contained duplicates. '
'Perhaps you wanted to specify a mask instead, but '
'forgot the mask= keyword.')
self.index = np.asarray(indices, int)
if self.index.ndim != 1:
raise ValueError('Wrong argument to FixAtoms class!')
def adjust_positions(self, old, new):
new[self.index] = old[self.index]
def adjust_forces(self, positions, forces):
forces[self.index] = 0.0
def index_shuffle(self, ind):
# See docstring of superclass
if self.index.dtype == bool:
self.index = self.index[ind]
else:
index = []
for new, old in slice2enlist(ind):
if old in self.index:
index.append(new)
if len(index) == 0:
raise IndexError('All indices in FixAtoms not part of slice')
self.index = np.asarray(index, int)
def copy(self):
if self.index.dtype == bool:
return FixAtoms(mask=self.index.copy())
else:
return FixAtoms(indices=self.index.copy())
def __repr__(self):
if self.index.dtype == bool:
return 'FixAtoms(mask=%s)' % ints2string(self.index.astype(int))
return 'FixAtoms(indices=%s)' % ints2string(self.index)
def todict(self):
dct = {'name': 'ase.constraints.FixAtoms'}
if self.index.dtype == bool:
dct['kwargs'] = {'mask': self.index}
else:
dct['kwargs'] = {'indices': self.index}
return dct
def repeat(self, m, n):
i0 = 0
l = len(self.index)
natoms = 0
if isinstance(m, int):
m = (m, m, m)
index_new = []
for m2 in range(m[2]):
for m1 in range(m[1]):
for m0 in range(m[0]):
i1 = i0 + n
if self.index.dtype == bool:
index_new.extend(self.index)
else:
index_new += [i + natoms for i in self.index]
i0 = i1
natoms += n
if self.index.dtype == bool:
self.index = np.asarray(index_new, bool)
else:
self.index = np.asarray(index_new, int)
return self
def delete_atom(self, ind):
""" Removes atom number ind from the index array, if present.
Required for removing atoms with existing FixAtoms constraints.
"""
if self.index.dtype == bool:
self.index = np.delete(self.index, ind)
else:
if ind in self.index:
i = list(self.index).index(ind)
self.index = np.delete(self.index, i)
for i in range(len(self.index)):
if self.index[i] >= ind:
self.index[i] -= 1
def ints2string(x, threshold=10):
"""Convert ndarray of ints to string."""
if len(x) <= threshold:
return str(x.tolist())
return str(x[:threshold].tolist())[:-1] + ', ...]'
class FixBondLengths(FixConstraint):
def __init__(self, pairs, iterations=10, atoms=None, mic=False):
self.constraints = [FixBondLength(a1, a2, atoms=atoms, mic=mic)
for a1, a2 in pairs]
self.iterations = iterations
def adjust_positions(self, old, new):
for i in range(self.iterations):
for constraint in self.constraints:
constraint.adjust_positions(old, new)
def adjust_forces(self, positions, forces):
for i in range(self.iterations):
for constraint in self.constraints:
constraint.adjust_forces(positions, forces)
def copy(self):
return FixBondLengths([constraint.indices
for constraint in self.constraints])
class FixBondLength(FixConstraint):
"""Constraint object for fixing a bond length."""
def __init__(self, a1, a2, atoms=None, mic=False):
"""Fix distance between atoms with indices a1 and a2. If mic is
True, follows the minimum image convention to keep constant the
shortest distance between a1 and a2 in any periodic direction.
atoms only needs to be supplied if mic=True.
"""
self.indices = [a1, a2]
self.constraint_force = None
self.mic = None
if mic:
if atoms is None:
raise RuntimeError('Please provide an atoms '
'object with mic=True.')
# Note: self.mic stores the atoms object that
# is required for the cell and pbc flags.
self.mic = atoms
def adjust_positions(self, old, new):
p1, p2 = old[self.indices]
d = p2 - p1
if self.mic:
Dr = np.linalg.solve(self.mic.get_cell().T, d)
d = np.dot(Dr - np.round(Dr) * self.mic.get_pbc(),
self.mic.get_cell())
p = sqrt(np.dot(d, d))
q1, q2 = new[self.indices]
d = q2 - q1
if self.mic:
Dr = np.linalg.solve(self.mic.get_cell().T, d)
d = np.dot(Dr - np.round(Dr) * self.mic.get_pbc(),
self.mic.get_cell())
q = sqrt(np.dot(d, d))
d *= 0.5 * (p - q) / q
new[self.indices] = (q1 - d, q2 + d)
def adjust_forces(self, positions, forces):
d = np.subtract.reduce(positions[self.indices])
if self.mic:
Dr = np.linalg.solve(self.mic.get_cell().T, d)
d = np.dot(Dr - np.round(Dr) * self.mic.get_pbc(),
self.mic.get_cell())
d2 = np.dot(d, d)
d *= 0.5 * np.dot(np.subtract.reduce(forces[self.indices]), d) / d2
self.constraint_force = d
forces[self.indices] += (-d, d)
def index_shuffle(self, ind):
"""Shuffle the indices of the two atoms in this constraint"""
newa = [-1, -1] # Signal error
for new, old in slice2enlist(ind):
for i, a in enumerate(self.indices):
if old == a:
newa[i] = new
if newa[0] == -1 or newa[1] == -1:
raise IndexError('Constraint not part of slice')
self.indices = newa
def copy(self):
if self.mic:
raise NotImplementedError('Not implemented for mic.')
return FixBondLength(*self.indices)
def get_constraint_force(self):
return self.constraint_force
def __repr__(self):
return 'FixBondLength(%d, %d)' % tuple(self.indices)
def todict(self):
if self.mic:
raise NotImplementedError('Not implemented for mic.')
return {'name': 'ase.constraints.FixBondLength',
'kwargs': {'a1': self.indices[0], 'a2': self.indices[1]}}
class FixedMode(FixConstraint):
"""Constrain atoms to move along directions orthogonal to
a given mode only."""
def __init__(self, mode):
self.mode = (np.asarray(mode) / np.sqrt((mode**2).sum())).reshape(-1)
def adjust_positions(self, oldpositions, newpositions):
newpositions = newpositions.ravel()
oldpositions = oldpositions.ravel()
step = newpositions - oldpositions
newpositions -= self.mode * np.dot(step, self.mode)
newpositions = newpositions.reshape(-1, 3)
oldpositions = oldpositions.reshape(-1, 3)
def adjust_forces(self, positions, forces):
forces = forces.ravel()
forces -= self.mode * np.dot(forces, self.mode)
forces = forces.reshape(-1, 3)
def index_shuffle(self, ind):
eps = 1e-12
mode = self.mode.reshape(-1, 3)
excluded = np.ones(len(mode), dtype=bool)
excluded[ind] = False
if (abs(mode[excluded]) > eps).any():
raise IndexError('All nonzero parts of mode not in slice')
self.mode = mode[ind].ravel()
def copy(self):
return FixedMode(self.mode)
def __repr__(self):
return 'FixedMode(%s)' % self.mode.tolist()
class FixedPlane(FixConstraintSingle):
"""Constrain an atom *a* to move in a given plane only.
The plane is defined by its normal: *direction*."""
def __init__(self, a, direction):
self.a = a
self.dir = np.asarray(direction) / sqrt(np.dot(direction, direction))
def adjust_positions(self, oldpositions, newpositions):
step = newpositions[self.a] - oldpositions[self.a]
newpositions[self.a] -= self.dir * np.dot(step, self.dir)
def adjust_forces(self, positions, forces):
forces[self.a] -= self.dir * np.dot(forces[self.a], self.dir)
def copy(self):
return FixedPlane(self.a, self.dir)
def __repr__(self):
return 'FixedPlane(%d, %s)' % (self.a, self.dir.tolist())
class FixedLine(FixConstraintSingle):
"""Constrain an atom *a* to move on a given line only.
The line is defined by its *direction*."""
def __init__(self, a, direction):
self.a = a
self.dir = np.asarray(direction) / sqrt(np.dot(direction, direction))
def adjust_positions(self, oldpositions, newpositions):
step = newpositions[self.a] - oldpositions[self.a]
x = np.dot(step, self.dir)
newpositions[self.a] = oldpositions[self.a] + x * self.dir
def adjust_forces(self, positions, forces):
forces[self.a] = self.dir * np.dot(forces[self.a], self.dir)
def copy(self):
return FixedLine(self.a, self.dir)
def __repr__(self):
return 'FixedLine(%d, %s)' % (self.a, self.dir.tolist())
class FixCartesian(FixConstraintSingle):
'Fix an atom in the directions of the cartesian coordinates.'
def __init__(self, a, mask=(1, 1, 1)):
self.a = a
self.mask = -(np.array(mask) - 1)
def adjust_positions(self, old, new):
step = new[self.a] - old[self.a]
step *= self.mask
new[self.a] = old[self.a] + step
def adjust_forces(self, positions, forces):
forces[self.a] *= self.mask
def copy(self):
return FixCartesian(self.a, 1 - self.mask)
def __repr__(self):
return 'FixCartesian(indice=%s mask=%s)' % (self.a, self.mask)
class FixScaled(FixConstraintSingle):
'Fix an atom in the directions of the unit vectors.'
def __init__(self, cell, a, mask=(1, 1, 1)):
self.cell = cell
self.a = a
self.mask = np.array(mask)
def adjust_positions(self, old, new):
scaled_old = np.linalg.solve(self.cell.T, old.T).T
scaled_new = np.linalg.solve(self.cell.T, new.T).T
for n in range(3):
if self.mask[n]:
scaled_new[self.a, n] = scaled_old[self.a, n]
new[self.a] = np.dot(scaled_new, self.cell)[self.a]
def adjust_forces(self, positions, forces):
scaled_forces = np.linalg.solve(self.cell.T, forces.T).T
scaled_forces[self.a] *= -(self.mask - 1)
forces[self.a] = np.dot(scaled_forces, self.cell)[self.a]
def copy(self):
return FixScaled(self.cell, self.a, self.mask)
def __repr__(self):
return 'FixScaled(%s, %d, %s)' % (repr(self.cell),
self.a,
repr(self.mask))
# TODO: Better interface might be to use dictionaries in place of very
# nested lists/tuples
class FixInternals(FixConstraint):
"""Constraint object for fixing multiple internal coordinates.
Allows fixing bonds, angles, and dihedrals."""
def __init__(self, atoms=None, bonds=None, angles=None, dihedrals=None,
epsilon=1.e-7, _copy_init=None):
if _copy_init is None:
if atoms is None:
raise ValueError('Atoms object has to be defined.')
masses = atoms.get_masses()
if bonds is None:
bonds = []
if angles is None:
angles = []
if dihedrals is None:
dihedrals = []
self.n = len(bonds) + len(angles) + len(dihedrals)
self.constraints = []
for bond in bonds:
masses_bond = masses.take(bond[1])
self.constraints.append(self.FixBondLengthAlt(bond[0], bond[1],
masses_bond))
for angle in angles:
masses_angle = masses.take(angle[1])
self.constraints.append(self.FixAngle(angle[0], angle[1],
masses_angle))
for dihedral in dihedrals:
masses_dihedral = masses.take(dihedral[1])
self.constraints.append(self.FixDihedral(dihedral[0],
dihedral[1],
masses_dihedral))
self.epsilon = epsilon
#copy case for __init__
else:
self.constraints = _copy_init
self.n = len(self.constraints)
self.epsilon = epsilon
def adjust_positions(self, old, new):
for constraint in self.constraints:
constraint.set_h_vectors(old)
for j in range(50):
maxerr = 0.0
for constraint in self.constraints:
constraint.adjust_positions(old, new)
maxerr = max(abs(constraint.sigma), maxerr)
if maxerr < self.epsilon:
return
raise ValueError('Shake did not converge.')
def adjust_forces(self, positions, forces):
#Project out translations and rotations and all other constraints
N = len(forces)
list2_constraints = list(np.zeros((6, N, 3)))
tx, ty, tz, rx, ry, rz = list2_constraints
list_constraints = [r.ravel() for r in list2_constraints]
tx[:, 0] = 1.0
ty[:, 1] = 1.0
tz[:, 2] = 1.0
ff = forces.ravel()
#Calculate the center of mass
center = positions.sum(axis=0) / N
rx[:, 1] = -(positions[:, 2] - center[2])
rx[:, 2] = positions[:, 1] - center[1]
ry[:, 0] = positions[:, 2] - center[2]
ry[:, 2] = -(positions[:, 0] - center[0])
rz[:, 0] = -(positions[:, 1] - center[1])
rz[:, 1] = positions[:, 0] - center[0]
#Normalizing transl., rotat. constraints
for r in list2_constraints:
r /= np.linalg.norm(r.ravel())
#Add all angle, etc. constraint vectors
for constraint in self.constraints:
constraint.adjust_forces(positions, forces)
list_constraints.insert(0, constraint.h)
#QR DECOMPOSITION - GRAM SCHMIDT
list_constraints = [r.ravel() for r in list_constraints]
aa = np.column_stack(list_constraints)
(aa, bb) = np.linalg.qr(aa)
#Projektion
hh = []
for i, constraint in enumerate(self.constraints):
hh.append(aa[:, i] * np.row_stack(aa[:, i]))
txx = aa[:, self.n] * np.row_stack(aa[:, self.n])
tyy = aa[:, self.n + 1] * np.row_stack(aa[:, self.n + 1])
tzz = aa[:, self.n + 2] * np.row_stack(aa[:, self.n + 2])
rxx = aa[:, self.n + 3] * np.row_stack(aa[:, self.n + 3])
ryy = aa[:, self.n + 4] * np.row_stack(aa[:, self.n + 4])
rzz = aa[:, self.n + 5] * np.row_stack(aa[:, self.n + 5])
T = txx + tyy + tzz + rxx + ryy + rzz
for vec in hh:
T += vec
ff = np.dot(T, np.row_stack(ff))
forces[:, :] -= np.dot(T, np.row_stack(ff)).reshape(-1, 3)
def copy(self):
return FixInternals(epsilon=self.epsilon, _copy_init=self.constraints)
def __repr__(self):
constraints = repr(self.constraints)
return 'FixInternals(_copy_init=%s, epsilon=%s)' % (constraints,
repr(self.epsilon))
def __str__(self):
return '\n'.join([repr(c) for c in self.constraints])
#Classes for internal use in FixInternals
class FixBondLengthAlt:
"""Constraint subobject for fixing bond length within FixInternals."""
def __init__(self, bond, indices, masses, maxstep=0.01):
"""Fix distance between atoms with indices a1, a2."""
self.indices = indices
self.bond = bond
self.h1 = None
self.h2 = None
self.masses = masses
self.h = []
self.sigma = 1.
def set_h_vectors(self, pos):
dist1 = pos[self.indices[0]] - pos[self.indices[1]]
self.h1 = 2 * dist1
self.h2 = -self.h1
def adjust_positions(self, old, new):
h1 = self.h1 / self.masses[0]
h2 = self.h2 / self.masses[1]
dist1 = new[self.indices[0]] - new[self.indices[1]]
dist = np.dot(dist1, dist1)
self.sigma = dist - self.bond**2
lamda = -self.sigma / (2 * np.dot(dist1, (h1 - h2)))
new[self.indices[0]] += lamda * h1
new[self.indices[1]] += lamda * h2
def adjust_forces(self, positions, forces):
self.h1 = 2 * (positions[self.indices[0]] -
positions[self.indices[1]])
self.h2 = -self.h1
self.h = np.zeros([len(forces) * 3])
self.h[(self.indices[0]) * 3] = self.h1[0]
self.h[(self.indices[0]) * 3 + 1] = self.h1[1]
self.h[(self.indices[0]) * 3 + 2] = self.h1[2]
self.h[(self.indices[1]) * 3] = self.h2[0]
self.h[(self.indices[1]) * 3 + 1] = self.h2[1]
self.h[(self.indices[1]) * 3 + 2] = self.h2[2]
self.h /= np.linalg.norm(self.h)
def __repr__(self):
return 'FixBondLengthAlt(%s, %d, %d)' % \
(repr(self.bond), self.indices[0], self.indices[1])
class FixAngle:
"""Constraint object for fixing an angle within
FixInternals."""
def __init__(self, angle, indices, masses):
"""Fix atom movement to construct a constant angle."""
self.indices = indices
self.a1m, self.a2m, self.a3m = masses
self.angle = np.cos(angle)
self.h1 = self.h2 = self.h3 = None
self.h = []
self.sigma = 1.
def set_h_vectors(self, pos):
r21 = pos[self.indices[0]] - pos[self.indices[1]]
r21_len = np.linalg.norm(r21)
e21 = r21 / r21_len
r23 = pos[self.indices[2]] - pos[self.indices[1]]
r23_len = np.linalg.norm(r23)
e23 = r23 / r23_len
angle = np.dot(e21, e23)
self.h1 = -2 * angle * ((angle * e21 - e23) / (r21_len))
self.h3 = -2 * angle * ((angle * e23 - e21) / (r23_len))
self.h2 = -(self.h1 + self.h3)
def adjust_positions(self, oldpositions, newpositions):
r21 = newpositions[self.indices[0]] - newpositions[self.indices[1]]
r21_len = np.linalg.norm(r21)
e21 = r21 / r21_len
r23 = newpositions[self.indices[2]] - newpositions[self.indices[1]]
r23_len = np.linalg.norm(r23)
e23 = r23 / r23_len
angle = np.dot(e21, e23)
self.sigma = (angle - self.angle) * (angle + self.angle)
h1 = self.h1 / self.a1m
h3 = self.h3 / self.a3m
h2 = self.h2 / self.a2m
h21 = h1 - h2
h23 = h3 - h2
# Calculating new positions
deriv = (((np.dot(r21, h23) + np.dot(r23, h21))
/ (r21_len * r23_len))
- (np.dot(r21, h21) / (r21_len * r21_len)
+ np.dot(r23, h23) / (r23_len * r23_len)) * angle)
deriv *= 2 * angle
lamda = -self.sigma / deriv
newpositions[self.indices[0]] += lamda * h1
newpositions[self.indices[1]] += lamda * h2
newpositions[self.indices[2]] += lamda * h3
def adjust_forces(self, positions, forces):
r21 = positions[self.indices[0]] - positions[self.indices[1]]
r21_len = np.linalg.norm(r21)
e21 = r21 / r21_len
r23 = positions[self.indices[2]] - positions[self.indices[1]]
r23_len = np.linalg.norm(r23)
e23 = r23 / r23_len
angle = np.dot(e21, e23)
self.h1 = -2 * angle * (angle * e21 - e23) / r21_len
self.h3 = -2 * angle * (angle * e23 - e21) / r23_len
self.h2 = -(self.h1 + self.h3)
self.h = np.zeros([len(positions) * 3])
self.h[(self.indices[0]) * 3] = self.h1[0]
self.h[(self.indices[0]) * 3 + 1] = self.h1[1]
self.h[(self.indices[0]) * 3 + 2] = self.h1[2]
self.h[(self.indices[1]) * 3] = self.h2[0]
self.h[(self.indices[1]) * 3 + 1] = self.h2[1]
self.h[(self.indices[1]) * 3 + 2] = self.h2[2]
self.h[(self.indices[2]) * 3] = self.h3[0]
self.h[(self.indices[2]) * 3 + 1] = self.h3[1]
self.h[(self.indices[2]) * 3 + 2] = self.h3[2]
self.h /= np.linalg.norm(self.h)
def __repr__(self):
return 'FixAngle(%s, %f)' % (tuple(self.indices),
np.arccos(self.angle))
class FixDihedral:
"""Constraint object for fixing an dihedral using
the shake algorithm. This one allows also other constraints."""
def __init__(self, angle, indices, masses):
"""Fix atom movement to construct a constant dihedral angle."""
self.indices = indices
self.a1m, self.a2m, self.a3m, self.a4m = masses
self.angle = np.cos(angle)
self.h1 = self.h2 = self.h3 = self.h4 = None
self.h = []
self.sigma = 1.
def set_h_vectors(self, pos):
r12 = pos[self.indices[1]] - pos[self.indices[0]]
r12_len = np.linalg.norm(r12)
e12 = r12 / r12_len
r23 = pos[self.indices[2]] - pos[self.indices[1]]
r23_len = np.linalg.norm(r23)
e23 = r23 / r23_len
r34 = pos[self.indices[3]] - pos[self.indices[2]]
r34_len = np.linalg.norm(r34)
e34 = r34 / r34_len
a = -r12 - np.dot(-r12, e23) * e23
a_len = np.linalg.norm(a)
ea = a / a_len
b = r34 - np.dot(r34, e23) * e23
b_len = np.linalg.norm(b)
eb = b / b_len
angle = np.dot(ea, eb).clip(-1.0, 1.0)
self.h1 = (eb - angle * ea) / a_len
self.h4 = (ea - angle * eb) / b_len
self.h2 = self.h1 * (np.dot(-r12, e23) / r23_len - 1)
self.h2 += np.dot(r34, e23) / r23_len * self.h4
self.h3 = -self.h4 * (np.dot(r34, e23) / r23_len + 1)
self.h3 += np.dot(r12, e23) / r23_len * self.h1
def adjust_positions(self, oldpositions, newpositions):
r12 = newpositions[self.indices[1]] - newpositions[self.indices[0]]
r12_len = np.linalg.norm(r12)
e12 = r12 / r12_len
r23 = newpositions[self.indices[2]] - newpositions[self.indices[1]]
r23_len = np.linalg.norm(r23)
e23 = r23 / r23_len
r34 = newpositions[self.indices[3]] - newpositions[self.indices[2]]
r34_len = np.linalg.norm(r34)
e34 = r34 / r34_len
n1 = np.cross(r12, r23)
n1_len = np.linalg.norm(n1)
n1e = n1 / n1_len
n2 = np.cross(r23, r34)
n2_len = np.linalg.norm(n2)
n2e = n2 / n2_len
angle = np.dot(n1e, n2e).clip(-1.0, 1.0)
self.sigma = (angle - self.angle) * (angle + self.angle)
h1 = self.h1 / self.a1m
h2 = self.h2 / self.a2m
h3 = self.h3 / self.a3m
h4 = self.h4 / self.a4m
h12 = h2 - h1
h23 = h3 - h2
h34 = h4 - h3
deriv = ((np.dot(n1, np.cross(r34, h23) + np.cross(h34, r23))
+ np.dot(n2, np.cross(r23, h12) + np.cross(h23, r12)))
/ (n1_len * n2_len))
deriv -= (((np.dot(n1, np.cross(r23, h12) + np.cross(h23, r12))
/ n1_len**2)
+ (np.dot(n2, np.cross(r34, h23) + np.cross(h34, r23))
/ n2_len**2)) * angle)
deriv *= -2 * angle
lamda = -self.sigma / deriv
newpositions[self.indices[0]] += lamda * h1
newpositions[self.indices[1]] += lamda * h2
newpositions[self.indices[2]] += lamda * h3
newpositions[self.indices[3]] += lamda * h4
def adjust_forces(self, positions, forces):
r12 = positions[self.indices[1]] - positions[self.indices[0]]
r12_len = np.linalg.norm(r12)
e12 = r12 / r12_len
r23 = positions[self.indices[2]] - positions[self.indices[1]]
r23_len = np.linalg.norm(r23)
e23 = r23 / r23_len
r34 = positions[self.indices[3]] - positions[self.indices[2]]
r34_len = np.linalg.norm(r34)
e34 = r34 / r34_len
a = -r12 - np.dot(-r12, e23) * e23
a_len = np.linalg.norm(a)
ea = a / a_len
b = r34 - np.dot(r34, e23) * e23
b_len = np.linalg.norm(b)
eb = b / b_len
angle = np.dot(ea, eb).clip(-1.0, 1.0)
self.h1 = (eb - angle * ea) / a_len
self.h4 = (ea - angle * eb) / b_len
self.h2 = self.h1 * (np.dot(-r12, e23) / r23_len - 1)
self.h2 += np.dot(r34, e23) / r23_len * self.h4
self.h3 = -self.h4 * (np.dot(r34, e23) / r23_len + 1)
self.h3 -= np.dot(-r12, e23) / r23_len * self.h1
self.h = np.zeros([len(positions) * 3])
self.h[(self.indices[0]) * 3] = self.h1[0]
self.h[(self.indices[0]) * 3 + 1] = self.h1[1]
self.h[(self.indices[0]) * 3 + 2] = self.h1[2]
self.h[(self.indices[1]) * 3] = self.h2[0]
self.h[(self.indices[1]) * 3 + 1] = self.h2[1]
self.h[(self.indices[1]) * 3 + 2] = self.h2[2]
self.h[(self.indices[2]) * 3] = self.h3[0]
self.h[(self.indices[2]) * 3 + 1] = self.h3[1]
self.h[(self.indices[2]) * 3 + 2] = self.h3[2]
self.h[(self.indices[3]) * 3] = self.h4[0]
self.h[(self.indices[3]) * 3 + 1] = self.h4[1]
self.h[(self.indices[3]) * 3 + 2] = self.h4[2]
self.h /= np.linalg.norm(self.h)
def __repr__(self):
return 'FixDihedral(%s, %f)' % (tuple(self.indices), self.angle)
class Hookean(FixConstraint):
"""Applies a Hookean restorative force between a pair of atoms, an atom
and a point, or an atom and a plane."""
def __init__(self, a1, a2, k, rt=None):
"""Forces two atoms to stay close together by applying no force if
they are below a threshold length, rt, and applying a Hookean
restorative force when the distance between them exceeds rt. Can
also be used to tether an atom to a fixed point in space or to a
distance above a plane.
Parameters
----------
a1 : int
Index of atom 1
a2 : one of three options
1) index of atom 2
2) a fixed point in cartesian space to which to tether a1
3) a plane given as (A, B, C, D) in A x + B y + C z + D = 0.
k : float
Hooke's law (spring) constant to apply when distance
exceeds threshold_length. Units of eV A^-2.
rt : float
The threshold length below which there is no force. The
length is 1) between two atoms, 2) between atom and point.
This argument is not supplied in case 3. Units of A.
If a plane is specified, the Hooke's law force is applied if the atom
is on the normal side of the plane. For instance, the plane with
(A, B, C, D) = (0, 0, 1, -7) defines a plane in the xy plane with a z
intercept of +7 and a normal vector pointing in the +z direction.
If the atom has z > 7, then a downward force would be applied of
k * (atom.z - 7). The same plane with the normal vector pointing in
the -z direction would be given by (A, B, C, D) = (0, 0, -1, 7).
"""
if type(a2) == int:
self._type = 'two atoms'
self.indices = [a1, a2]
elif len(a2) == 3:
self._type = 'point'
self.index = a1
self.origin = np.array(a2)
elif len(a2) == 4:
self._type = 'plane'
self.index = a1
self.plane = a2
else:
raise RuntimeError('Unknown type for a2')
self.threshold = rt
self.spring = k
def todict(self):
dct = {'name': 'ase.constraints.Hookean'}
dct['kwargs'] = {'rt': self.threshold,
'k': self.spring}
if self._type == 'two atoms':
dct['kwargs']['a1'] = self.indices[0]
dct['kwargs']['a2'] = self.indices[1]
elif self._type == 'point':
dct['kwargs']['a1'] = self.index
dct['kwargs']['a2'] = self.origin
elif self._type == 'plane':
dct['kwargs']['a1'] = self.index
dct['kwargs']['a2'] = self.plane
else:
raise NotImplementedError('Bad type: %s' % self._type)
return dct
def adjust_positions(self, oldpositions, newpositions):
pass
def adjust_forces(self, positions, forces):
if self._type == 'plane':
A, B, C, D = self.plane
x, y, z = positions[self.index]
d = ((A * x + B * y + C * z + D) /
np.sqrt(A**2 + B**2 + C**2))
if d < 0:
return
magnitude = self.spring * d
direction = - np.array((A, B, C)) / np.linalg.norm((A, B, C))
forces[self.index] += direction * magnitude
return
if self._type == 'two atoms':
p1, p2 = positions[self.indices]
elif self._type == 'point':
p1 = positions[self.index]
p2 = self.origin
displace = p2 - p1
bondlength = np.linalg.norm(displace)
if bondlength > self.threshold:
magnitude = self.spring * (bondlength - self.threshold)
direction = displace / np.linalg.norm(displace)
if self._type == 'two atoms':
forces[self.indices[0]] += direction * magnitude
forces[self.indices[1]] -= direction * magnitude
else:
forces[self.index] += direction * magnitude
def adjust_momenta(self, positions, momenta):
pass
def adjust_potential_energy(self, positions, energy):
"""Returns the difference to the potential energy due to an active
constraint. (That is, the quantity returned is to be added to the
potential energy.)"""
if self._type == 'plane':
A, B, C, D = self.plane
x, y, z = positions[self.index]
d = ((A * x + B * y + C * z + D) /
np.sqrt(A**2 + B**2 + C**2))
if d > 0:
return 0.5 * self.spring * d**2
else:
return 0.
if self._type == 'two atoms':
p1, p2 = positions[self.indices]
elif self._type == 'point':
p1 = positions[self.index]
p2 = self.origin
displace = p2 - p1
bondlength = np.linalg.norm(displace)
if bondlength > self.threshold:
return 0.5 * self.spring * (bondlength - self.threshold)**2
else:
return 0.
def __repr__(self):
if self._type == 'two atoms':
return 'Hookean(%d, %d)' % tuple(self.indices)
elif self._type == 'point':
return 'Hookean(%d) to cartesian' % self.index
else:
return 'Hookean(%d) to plane' % self.index
def copy(self):
if self._type == 'two atoms':
return Hookean(a1=self.indices[0], a2=self.indices[1],
rt=self.threshold, k=self.spring)
elif self._type == 'point':
return Hookean(a1=self.index, a2=self.origin,
rt=self.threshold, k=self.spring)
else:
return Hookean(a1=self.index, a2=self.plane,
k=self.spring)
class Filter:
"""Subset filter class."""
def __init__(self, atoms, indices=None, mask=None):
"""Filter atoms.
This filter can be used to hide degrees of freedom in an Atoms
object.
Parameters
----------
indices : list of int
Indices for those atoms that should remain visible.
mask : list of bool
One boolean per atom indicating if the atom should remain
visible or not.
"""
self.atoms = atoms
self.constraints = []
if indices is None and mask is None:
raise ValueError('Use "indices" or "mask".')
if indices is not None and mask is not None:
raise ValueError('Use only one of "indices" and "mask".')
if mask is not None:
self.index = np.asarray(mask, bool)
self.n = self.index.sum()
else:
self.index = np.asarray(indices, int)
self.n = len(self.index)
def get_cell(self):
"""Returns the computational cell.
The computational cell is the same as for the original system.
"""
return self.atoms.get_cell()
def get_pbc(self):
"""Returns the periodic boundary conditions.
The boundary conditions are the same as for the original system.
"""
return self.atoms.get_pbc()
def get_positions(self):
'Return the positions of the visible atoms.'
return self.atoms.get_positions()[self.index]
def set_positions(self, positions):
'Set the positions of the visible atoms.'
pos = self.atoms.get_positions()
pos[self.index] = positions
self.atoms.set_positions(pos)
positions = property(get_positions, set_positions,
doc='Positions of the atoms')
def get_momenta(self):
'Return the momenta of the visible atoms.'
return self.atoms.get_momenta()[self.index]
def set_momenta(self, momenta):
'Set the momenta of the visible atoms.'
mom = self.atoms.get_momenta()
mom[self.index] = momenta
self.atoms.set_momenta(mom)
def get_atomic_numbers(self):
'Return the atomic numbers of the visible atoms.'
return self.atoms.get_atomic_numbers()[self.index]
def set_atomic_numbers(self, atomic_numbers):
'Set the atomic numbers of the visible atoms.'
z = self.atoms.get_atomic_numbers()
z[self.index] = atomic_numbers
self.atoms.set_atomic_numbers(z)
def get_tags(self):
'Return the tags of the visible atoms.'
return self.atoms.get_tags()[self.index]
def set_tags(self, tags):
'Set the tags of the visible atoms.'
tg = self.atoms.get_tags()
tg[self.index] = tags
self.atoms.set_tags(tg)
def get_forces(self, *args, **kwargs):
return self.atoms.get_forces(*args, **kwargs)[self.index]
def get_stress(self):
return self.atoms.get_stress()
def get_stresses(self):
return self.atoms.get_stresses()[self.index]
def get_masses(self):
return self.atoms.get_masses()[self.index]
def get_potential_energy(self):
"""Calculate potential energy.
Returns the potential energy of the full system.
"""
return self.atoms.get_potential_energy()
def get_chemical_symbols(self):
return self.atoms.get_chemical_symbols()
def get_initial_magnetic_moments(self):
return self.atoms.get_initial_magnetic_moments()
def get_calculator(self):
"""Returns the calculator.
WARNING: The calculator is unaware of this filter, and sees a
different number of atoms.
"""
return self.atoms.get_calculator()
def get_celldisp(self):
return self.atoms.get_celldisp()
def has(self, name):
'Check for existence of array.'
return self.atoms.has(name)
def __len__(self):
'Return the number of movable atoms.'
return self.n
def __getitem__(self, i):
'Return an atom.'
return self.atoms[self.index[i]]
class StrainFilter(Filter):
"""Modify the supercell while keeping the scaled positions fixed.
Presents the strain of the supercell as the generalized positions,
and the global stress tensor (times the volume) as the generalized
force.
This filter can be used to relax the unit cell until the stress is
zero. If MDMin is used for this, the timestep (dt) to be used
depends on the system size. 0.01/x where x is a typical dimension
seems like a good choice.
The stress and strain are presented as 6-vectors, the order of the
components follow the standard engingeering practice: xx, yy, zz,
yz, xz, xy.
"""
def __init__(self, atoms, mask=None):
"""Create a filter applying a homogeneous strain to a list of atoms.
The first argument, atoms, is the atoms object.
The optional second argument, mask, is a list of six booleans,
indicating which of the six independent components of the
strain that are allowed to become non-zero. It defaults to
[1,1,1,1,1,1].
"""
self.atoms = atoms
self.strain = np.zeros(6)
if mask is None:
self.mask = np.ones(6)
else:
self.mask = np.array(mask)
self.index = np.arange(len(atoms))
self.n = self.index.sum()
self.origcell = atoms.get_cell()
def get_positions(self):
return self.strain.reshape((2, 3)).copy()
def set_positions(self, new):
new = new.ravel() * self.mask
eps = np.array([[1.0 + new[0], 0.5 * new[5], 0.5 * new[4]],
[0.5 * new[5], 1.0 + new[1], 0.5 * new[3]],
[0.5 * new[4], 0.5 * new[3], 1.0 + new[2]]])
self.atoms.set_cell(np.dot(self.origcell, eps), scale_atoms=True)
self.strain[:] = new
def get_forces(self):
stress = self.atoms.get_stress()
return -self.atoms.get_volume() * (stress * self.mask).reshape((2, 3))
def get_potential_energy(self):
return self.atoms.get_potential_energy()
def has(self, x):
return self.atoms.has(x)
def __len__(self):
return 2
class UnitCellFilter(Filter):
"""Modify the supercell and the atom positions. """
def __init__(self, atoms, mask=None):
"""Create a filter that returns the atomic forces and unit cell
stresses together, so they can simultaneously be minimized.
The first argument, atoms, is the atoms object. The optional second
argument, mask, is a list of booleans, indicating which of the six
independent components of the strain are relaxed.
- True = relax to zero
- False = fixed, ignore this component
You can still use constraints on the atoms, e.g. FixAtoms, to control
the relaxation of the atoms.
>>> # this should be equivalent to the StrainFilter
>>> atoms = Atoms(...)
>>> atoms.set_constraint(FixAtoms(mask=[True for atom in atoms]))
>>> ucf = UnitCellFilter(atoms)
You should not attach this UnitCellFilter object to a
trajectory. Instead, create a trajectory for the atoms, and
attach it to an optimizer like this:
>>> atoms = Atoms(...)
>>> ucf = UnitCellFilter(atoms)
>>> qn = QuasiNewton(ucf)
>>> traj = PickleTrajectory('TiO2.traj', 'w', atoms)
>>> qn.attach(traj)
>>> qn.run(fmax=0.05)
Helpful conversion table:
- 0.05 eV/A^3 = 8 GPA
- 0.003 eV/A^3 = 0.48 GPa
- 0.0006 eV/A^3 = 0.096 GPa
- 0.0003 eV/A^3 = 0.048 GPa
- 0.0001 eV/A^3 = 0.02 GPa
"""
Filter.__init__(self, atoms, indices=range(len(atoms)))
self.atoms = atoms
self.strain = np.zeros(6)
if mask is None:
self.mask = np.ones(6)
else:
self.mask = np.array(mask)
self.origcell = atoms.get_cell()
self.copy = self.atoms.copy
self.arrays = self.atoms.arrays
def get_positions(self):
'''
this returns an array with shape (natoms + 2,3).
the first natoms rows are the positions of the atoms, the last
two rows are the strains associated with the unit cell
'''
atom_positions = self.atoms.get_positions()
strains = self.strain.reshape((2, 3))
natoms = len(self.atoms)
all_pos = np.zeros((natoms + 2, 3), np.float)
all_pos[0:natoms, :] = atom_positions
all_pos[natoms:, :] = strains
return all_pos
def set_positions(self, new):
'''
new is an array with shape (natoms+2,3).
the first natoms rows are the positions of the atoms, the last
two rows are the strains used to change the cell shape.
The atom positions are set first, then the unit cell is
changed keeping the atoms in their scaled positions.
'''
natoms = len(self.atoms)
atom_positions = new[0:natoms, :]
self.atoms.set_positions(atom_positions)
new = new[natoms:, :] # this is only the strains
new = new.ravel() * self.mask
eps = np.array([[1.0 + new[0], 0.5 * new[5], 0.5 * new[4]],
[0.5 * new[5], 1.0 + new[1], 0.5 * new[3]],
[0.5 * new[4], 0.5 * new[3], 1.0 + new[2]]])
self.atoms.set_cell(np.dot(self.origcell, eps), scale_atoms=True)
self.strain[:] = new
def get_forces(self, apply_constraint=False):
'''
returns an array with shape (natoms+2,3) of the atomic forces
and unit cell stresses.
the first natoms rows are the forces on the atoms, the last
two rows are the stresses on the unit cell, which have been
reshaped to look like "atomic forces". i.e.,
f[-2] = -vol*[sxx,syy,szz]*mask[0:3]
f[-1] = -vol*[syz, sxz, sxy]*mask[3:]
apply_constraint is an argument expected by ase
'''
stress = self.atoms.get_stress()
atom_forces = self.atoms.get_forces()
natoms = len(self.atoms)
all_forces = np.zeros((natoms + 2, 3), np.float)
all_forces[0:natoms, :] = atom_forces
vol = self.atoms.get_volume()
stress_forces = -vol * (stress * self.mask).reshape((2, 3))
all_forces[natoms:, :] = stress_forces
return all_forces
def get_potential_energy(self):
return self.atoms.get_potential_energy()
def has(self, x):
return self.atoms.has(x)
def __len__(self):
return (2 + len(self.atoms))
| askhl/ase | ase/constraints.py | Python | gpl-2.0 | 46,818 | [
"ASE"
] | c9f378a07b1e7c30931fa7943dfef2f9e63b6b89233c0d97813ebc093dec4a35 |
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
#from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
from DIRAC.Core.Utilities.List import sortList
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.StorageManagementSystem.DB.StorageManagementDB import THROTTLING_STEPS, THROTTLING_TIME
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
import re
AGENT_NAME = 'StorageManagement/StageRequestAgent'
class StageRequestAgent( AgentModule ):
def initialize( self ):
self.stagerClient = StorageManagerClient()
#self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
# Resources helper
self.resources = Resources()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
# Get the current submitted stage space and the amount of pinned space for each storage element
res = self.getStorageUsage()
if not res['OK']:
return res
return self.submitStageRequests()
def getStorageUsage( self ):
""" Fill the current Status of the SE Caches from the DB
"""
self.storageElementCache = {}
res = self.stagerClient.getSubmittedStagePins()
if not res['OK']:
gLogger.fatal( "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
return res
self.storageElementUsage = res['Value']
if self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:" )
for storageElement in sortList( self.storageElementUsage.keys() ):
seDict = self.storageElementUsage[storageElement]
# Convert to GB for printout
seDict['TotalSize'] = seDict['TotalSize'] / ( 1000 * 1000 * 1000.0 )
gLogger.info( "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB." %
( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] ) )
if not self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: No active stage/pin requests found." )
return S_OK()
def submitStageRequests( self ):
""" This manages the following transitions of the Replicas
* Waiting -> Offline (if the file is not found Cached)
* Waiting -> StageSubmitted (if the file is found Cached)
* Offline -> StageSubmitted (if there are not more Waiting replicas)
"""
# Retry Replicas that have not been Staged in a previous attempt
res = self._getMissingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
if seReplicas:
gLogger.info( "StageRequest.submitStageRequests: Completing partially Staged Tasks" )
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
# Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
res = self._getOnlineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
# Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
res = self._getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
# Merge info from both results
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if storageElement not in seReplicas:
seReplicas[storageElement] = seReplicaIDs
else:
for replicaID in seReplicaIDs:
if replicaID not in seReplicas[storageElement]:
seReplicas[storageElement].append( replicaID )
allReplicaInfo.update( res['Value']['AllReplicaInfo'] )
gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas for staging." % len( allReplicaInfo ) )
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
return S_OK()
def _getMissingReplicas( self ):
""" This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
All SEs are considered, even if their Cache is full
"""
# Get Replicas that are in Staged/StageSubmitted
gLogger.info( 'StageRequest._getMissingReplicas: Checking Staged Replicas' )
res = self.__getStagedReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
replicasToStage = []
for _storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
# Consider all SEs
replicasToStage.extend( seReplicaIDs )
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def _getOnlineReplicas( self ):
""" This manages the transition
* Waiting -> Offline (if the file is not found Cached)
and returns the list of Cached Replicas for which the pin time has to be extended
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose( 'StageRequest._getOnlineReplicas: Checking Online Replicas to be handled' )
res = self.__getWaitingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len( allReplicaInfo ):
gLogger.info( "StageRequest._getOnlineReplicas: There were no Waiting replicas found" )
return res
gLogger.info( "StageRequest._getOnlineReplicas: Obtained %s replicas Waiting for staging." % len( allReplicaInfo ) )
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if not self.__usage( storageElement ) < self.__cache( storageElement ):
gLogger.info( 'StageRequest._getOnlineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
# Do not consider those SE that have the Cache full
continue
# Check if the Replica Metadata is OK and find out if they are Online or Offline
res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
if not res['OK']:
gLogger.error( 'StageRequest._getOnlineReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
else:
# keep only Online Replicas
seReplicas[storageElement] = res['Value']['Online']
replicasToStage.extend( res['Value']['Online'] )
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def _getOfflineReplicas( self ):
""" This checks Replicas in Offline status
and returns the list of Replicas to be Staged
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose( 'StageRequest._getOfflineReplicas: Checking Offline Replicas to be handled' )
res = self.__getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len( allReplicaInfo ):
gLogger.info( "StageRequest._getOfflineReplicas: There were no Offline replicas found" )
return res
gLogger.info( "StageRequest._getOfflineReplicas: Obtained %s replicas Offline for staging." % len( allReplicaInfo ) )
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if not self.__usage( storageElement ) < self.__cache( storageElement ):
gLogger.info( 'StageRequest._getOfflineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
# Do not consider those SE that have the Cache full
continue
seReplicas[storageElement] = []
for replicaID in sorted( seReplicaIDs ):
seReplicas[storageElement].append( replicaID )
replicasToStage.append( replicaID )
self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
if not self.__usage( storageElement ) < self.__cache( storageElement ):
# Stop adding Replicas when the cache is full
break
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def __usage( self, storageElement ):
""" Retrieve current usage of SE
"""
if not storageElement in self.storageElementUsage:
self.storageElementUsage[storageElement] = {'TotalSize': 0.}
return self.storageElementUsage[storageElement]['TotalSize']
def __cache( self, storageElement ):
""" Retrieve cache size for SE
"""
if not storageElement in self.storageElementCache:
diskCache = self.resources.getStorageElementValue( storageElement, 'DiskCacheTB', 1. )
self.storageElementCache[storageElement] = diskCache * 1000. / THROTTLING_STEPS
return self.storageElementCache[storageElement]
def __add( self, storageElement, size ):
""" Add size (in bytes) to current usage of storageElement (in GB)
"""
if not storageElement in self.storageElementUsage:
self.storageElementUsage[storageElement] = {'TotalSize': 0.}
size = size / ( 1000 * 1000 * 1000.0 )
self.storageElementUsage[storageElement]['TotalSize'] += size
return size
def _issuePrestageRequests( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Make the request to the SE and update the DB
"""
pfnRepIDs = {}
for replicaID in seReplicaIDs:
pfn = allReplicaInfo[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
# Now issue the prestage requests for the remaining replicas
stageRequestMetadata = {}
updatedPfnIDs = []
if pfnRepIDs:
gLogger.info( "StageRequest._issuePrestageRequests: Submitting %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
res = StorageElement( storageElement ).prestageFile( pfnRepIDs, lifetime = self.pinLifetime )
gLogger.debug( "StageRequest._issuePrestageRequests: StorageElement.prestageStorageFile: res=", res )
#Daniela: fishy result from ReplicaManager!!! Should NOT return OK
#res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
#res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Completely failed to submit stage requests for replicas.", res['Message'] )
else:
for pfn, requestID in res['Value']['Successful'].items():
if not stageRequestMetadata.has_key( requestID ):
stageRequestMetadata[requestID] = []
stageRequestMetadata[requestID].append( pfnRepIDs[pfn] )
updatedPfnIDs.append( pfnRepIDs[pfn] )
if stageRequestMetadata:
gLogger.info( "StageRequest._issuePrestageRequests: %s stage request metadata to be updated." % len( stageRequestMetadata ) )
res = self.stagerClient.insertStageRequest( stageRequestMetadata, self.pinLifetime )
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert stage request metadata.", res['Message'] )
return res
res = self.stagerClient.updateReplicaStatus( updatedPfnIDs, 'StageSubmitted' )
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert replica status.", res['Message'] )
return
def __sortBySE( self, replicaDict ):
seReplicas = {}
replicaIDs = {}
for replicaID, info in replicaDict.items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement}
if not seReplicas.has_key( storageElement ):
seReplicas[storageElement] = []
seReplicas[storageElement].append( replicaID )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':replicaIDs} )
def __getStagedReplicas( self ):
""" This obtains the Staged replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getStagedReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getStagedReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getStagedReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getStagedReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __getWaitingReplicas( self ):
""" This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getWaitingReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getWaitingReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __getOfflineReplicas( self ):
""" This obtains the Offline replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getOfflineReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getOfflineReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getOfflineReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getOfflineReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __addAssociatedReplicas( self, replicasToStage, seReplicas, allReplicaInfo ):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self.stagerClient.getAssociatedReplicas( replicasToStage )
if not res['OK']:
gLogger.fatal( "StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.", res['Message'] )
return res
addReplicas = {'Offline': {}, 'Waiting': {}}
replicaIDs = {}
for replicaID, info in res['Value'].items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
status = info['Status']
if status not in ['Waiting', 'Offline']:
continue
if not addReplicas[status].has_key( storageElement ):
addReplicas[status][storageElement] = []
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement }
addReplicas[status][storageElement].append( replicaID )
waitingReplicas = addReplicas['Waiting']
offlineReplicas = addReplicas['Offline']
newReplicaInfo = replicaIDs
allReplicaInfo.update( newReplicaInfo )
# First handle Waiting Replicas for which metadata is to be checked
for storageElement, seReplicaIDs in waitingReplicas.items():
for replicaID in list( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
if not res['OK']:
gLogger.error( 'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
else:
# keep all Replicas (Online and Offline)
if not storageElement in seReplicas:
seReplicas[storageElement] = []
seReplicas[storageElement].extend( res['Value']['Online'] )
replicasToStage.extend( res['Value']['Online'] )
seReplicas[storageElement].extend( res['Value']['Offline'] )
replicasToStage.extend( res['Value']['Offline'] )
# Then handle Offline Replicas for which metadata is already checked
for storageElement, seReplicaIDs in offlineReplicas.items():
if not storageElement in seReplicas:
seReplicas[storageElement] = []
for replicaID in sorted( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
seReplicas[storageElement].extend( seReplicaIDs )
replicasToStage.extend( seReplicaIDs )
for replicaID in allReplicaInfo.keys():
if replicaID not in replicasToStage:
del allReplicaInfo[replicaID]
totalSize = 0
for storageElement in sorted( seReplicas.keys() ):
replicaIDs = seReplicas[storageElement]
size = 0
for replicaID in replicaIDs:
size += self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
gLogger.info( 'StageRequest.__addAssociatedReplicas: Considering %s GB to be staged at %s' % ( size, storageElement ) )
totalSize += size
gLogger.info( "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging." % totalSize )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':allReplicaInfo} )
def __checkIntegrity( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Check the integrity of the files to ensure they are available
Updates status of Offline Replicas for a later pass
Return list of Online replicas to be Stage
"""
if not seReplicaIDs:
return S_OK( {'Online': [], 'Offline': []} )
pfnRepIDs = {}
for replicaID in seReplicaIDs:
pfn = allReplicaInfo[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
gLogger.info( "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." % ( len( pfnRepIDs ), storageElement ) )
res = StorageElement( storageElement ).getFileMetadata( pfnRepIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'] )
return res
terminalReplicaIDs = {}
onlineReplicaIDs = []
offlineReplicaIDs = []
for pfn, metadata in res['Value']['Successful'].items():
if metadata['Size'] != allReplicaInfo[pfnRepIDs[pfn]]['Size']:
gLogger.error( "StageRequest.__checkIntegrity: PFN StorageElement size does not match FileCatalog", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN StorageElement size does not match FileCatalog'
pfnRepIDs.pop( pfn )
elif metadata['Lost']:
gLogger.error( "StageRequest.__checkIntegrity: PFN has been Lost by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN has been Lost by the StorageElement'
pfnRepIDs.pop( pfn )
elif metadata['Unavailable']:
gLogger.error( "StageRequest.__checkIntegrity: PFN is declared Unavailable by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN is declared Unavailable by the StorageElement'
pfnRepIDs.pop( pfn )
else:
if metadata['Cached']:
gLogger.verbose( "StageRequest.__checkIntegrity: Cache hit for file." )
onlineReplicaIDs.append( pfnRepIDs[pfn] )
else:
offlineReplicaIDs.append( pfnRepIDs[pfn] )
for pfn, reason in res['Value']['Failed'].items():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageRequest.__checkIntegrity: PFN does not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN does not exist in the StorageElement'
pfnRepIDs.pop( pfn )
# Update the states of the replicas in the database #TODO Sent status to integrity DB
if terminalReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'] )
if onlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Online." % len( onlineReplicaIDs ) )
if offlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Offline." % len( offlineReplicaIDs ) )
res = self.stagerClient.updateReplicaStatus( offlineReplicaIDs, 'Offline' )
return S_OK( {'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs} )
| sposs/DIRAC | StorageManagementSystem/Agent/StageRequestAgent.py | Python | gpl-3.0 | 23,332 | [
"DIRAC"
] | 343cebb8b695ea4b27600d9253456727b89ed564dd9f0eae566bbfb4e5e7a57f |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import pytest
import os.path as osp
from sisl.io.vasp.locpot import *
import numpy as np
pytestmark = [pytest.mark.io, pytest.mark.vasp]
_dir = osp.join('sisl', 'io', 'vasp')
def test_graphene_locpot(sisl_files):
f = sisl_files(_dir, 'graphene', 'LOCPOT')
gridf64 = locpotSileVASP(f).read_grid()
gridf32 = locpotSileVASP(f).read_grid(dtype=np.float32)
geom = locpotSileVASP(f).read_geometry()
assert gridf64.dtype == np.float64
assert gridf32.dtype == np.float32
assert geom == gridf32.geometry
def test_graphene_locpot_index_float(sisl_files):
f = sisl_files(_dir, 'graphene', 'LOCPOT')
grid = locpotSileVASP(f).read_grid()
gridh = locpotSileVASP(f).read_grid(index=[0.5])
assert grid.grid.sum() / 2 == pytest.approx(gridh.grid.sum())
| zerothi/sisl | sisl/io/vasp/tests/test_locpot.py | Python | mpl-2.0 | 987 | [
"VASP"
] | 52a8d4f35e866ba2f7c36aa34120bfc97e5a0fa6ebeb9c593cec46df83668250 |
import _vlfeat
import numpy
from quickshift import vl_quickseg,vl_quickvis
def vl_sift(data, frames=None,
octaves=-1, levels=-1, first_octave=0,
peak_thresh=-1.0, edge_thresh=-1.0, norm_thresh=-1.0,
magnif=-1.0, window_size=-1.0, orientations=False,
verbose=0):
"""Computes the SIFT frames [1] (keypoints) F of the image I. I is a
gray-scale image in single precision. Each column of F is a feature frame
and has the format [X;Y;S;TH], where X,Y is the (fractional) center of the
frame, S is the scale and TH is the orientation (in radians).
Computes the SIFT descriptors [1] as well. Each column of D is the
descriptor of the corresponding frame in F. A descriptor is a
128-dimensional vector of class UINT8.
@param data A gray-scale image in single precision
(float numpy array).
@param frames Set the frames to use (bypass the detector). If frames
are not passed in order of increasing scale, they are
re-orderded.
@param octaves Set the number of octave of the DoG scale space.
@param levels Set the number of levels per octave of the DoG scale
space. The default value is 3.
@param first_octave Set the index of the first octave of the DoG scale
space. The default value is 0.
@param peak_thresh Set the peak selection threshold.
The default value is 0.
@param edge_thresh Set the non-edge selection threshold.
The default value is 10.
@param norm_thresh Set the minimum l2-norm of the descriptor before
normalization. Descriptors below the threshold are set
to zero.
@param magnif Set the descriptor magnification factor. The scale of
the keypoint is multiplied by this factor to obtain the
width (in pixels) of the spatial bins. For instance, if
there are there are 4 spatial bins along each spatial
direction, the ``diameter'' of the descriptor is
approximatively 4 * MAGNIF. The default value is 3.
@param orientations Compute the orientantions of the frames overriding the
orientation specified by the 'Frames' option.
@param verbose Be verbose (may be repeated to increase the verbosity
level).
"""
if frames is None:
frames = numpy.zeros(1)
else:
d, _ = frames.shape
assert d==4, "Frames must have shape (4, ?)"
if not frames.dtype is numpy.float64:
frames = numpy.array(frames)
if not frames.flags['F_CONTIGUOUS']:
frames = numpy.array(frames, order='F')
if not data.dtype is numpy.float32:
data = numpy.array(data, dtype=numpy.float32)
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_sift(data, frames, octaves, levels, first_octave,
peak_thresh, edge_thresh, norm_thresh, magnif,
window_size, orientations, verbose)
def vl_mser(
data,
delta=5,
max_area=.75,
min_area=.0002,
max_variation=.25,
min_diversity=.2):
""" Computes the Maximally Stable Extremal Regions (MSER) [1] of image I
with stability threshold DELTA. I is any array of class UINT8. R is a vector
of region seeds. \n
A (maximally stable) extremal region is just a connected component of one of
the level sets of the image I. An extremal region can be recovered from a
seed X as the connected component of the level set {Y: I(Y) <= I(X)} which
contains the pixel o index X. \n
It also returns ellipsoids F fitted to the regions. Each column of F
describes an ellipsoid; F(1:D,i) is the center of the elliposid and
F(D:end,i) are the independent elements of the co-variance matrix of the
ellipsoid. \n
Ellipsoids are computed according to the same reference frame of I seen as
a matrix. This means that the first coordinate spans the first dimension of
I. \n
The function vl_plotframe() is used to plot the ellipses.
@param data A gray-scale image in single precision.
@param delta Set the DELTA parameter of the VL_MSER algorithm.
Roughly speaking, the stability of a region is the
relative variation of the region area when the
intensity is changed of +/- Delta/2.
@param max_area Set the maximum area (volume) of the regions relative
to the image domain area (volume).
@param min_area Set the minimum area (volume) of the regions relative
to the image domain area (volume).
@param max_variation Set the maximum variation (absolute stability score)
of the regions.
@param min_diversity Set the minimum diversity of the region. When the
relative area variation of two nested regions is below
this threshold, then only the most stable one is
selected.
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_mser(data, delta, max_area, min_area, \
max_variation, min_diversity)
def vl_erfill(data, r):
""" Returns the list MEMBERS of the pixels which belongs to the extremal
region represented by the pixel ER. \n
The selected region is the one that contains pixel ER and of intensity
I(ER). \n
I must be of class UINT8 and ER must be a (scalar) index of the region
representative point.
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_erfill(data, r)
def vl_dsift(
data,
step=-1,
bounds=numpy.zeros(1, 'f'),
size=-1,
fast=True,
verbose=False,
norm=False):
""" [F,D] = VL_DSIFT(I) calculates the Dense Histogram of Gradients (DSIFT)
descriptors for the image I. I must be grayscale in SINGLE format.\n\n
In this implementation, a DSIFT descriptor is equivalent to a SIFT
descriptor (see VL_SIFT()). This function calculates quickly a large number
of such descriptors, for a dense covering of the image with features of the
same size and orientation.\n\n
The function returns the frames F and the descriptors D. Since all frames
have identical size and orientation, F has only two rows (for the X and Y
center coordinates). The orientation is fixed to zero. The scale is related
to the SIZE of the spatial bins, which by default is equal to 3 pixels (see
below). If NS is the number of bins in each spatial direction (by default
4), then a DSIFT keypoint covers a square patch of NS by SIZE pixels.\n\n
@remark The size of a SIFT bin is equal to the magnification factor MAGNIF
(usually 3) by the scale of the SIFT keypoint. This means that the scale of
the SIFT keypoints corresponding to the DSIFT descriptors is SIZE / MAGNIF.
@remark Although related, DSIFT is not the same as the HOG descriptor used
in [1]. This descriptor is equivalent to SIFT instead.
@param step Extract a descriptor each STEP pixels.
@param size A spatial bin covers SIZE pixels.
@param norm Append the frames with the normalization factor applied to
each descriptor. In this case, F has 3 rows and this value
is the 3rd row. This information can be used to suppress
descriptors with low contrast.
@param fast Use a flat rather than Gaussian window. Much faster.
@param verbose Be verbose.
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_dsift(data, step, bounds, size, fast, verbose, norm)
def vl_siftdescriptor(grad, frames):
""" D = VL_SIFTDESCRIPTOR(GRAD, F) calculates the SIFT descriptors of the
keypoints F on the pre-processed image GRAD. GRAD is a 2xMxN array. The
first layer GRAD(1,:,:) contains the modulus of gradient of the original
image modulus. The second layer GRAD(2,:,:) contains the gradient angle
(measured in radians, clockwise, starting from the X axis -- this assumes
that the Y axis points down). The matrix F contains one column per keypoint
with the X, Y, SGIMA and ANLGE parameters. \n \n
In order to match the standard SIFT descriptor, the gradient GRAD should be
calculated after mapping the image to the keypoint scale. This is obtained
by smoothing the image by a a Gaussian kernel of variance equal to the scale
of the keypoint. Additionaly, SIFT assumes that the input image is
pre-smoothed at scale 0.5 (this roughly compensates for the effect of the
CCD integrators), so the amount of smoothing that needs to be applied is
slightly less. The following code computes a standard SIFT descriptor by
using VL_SIFTDESCRIPTOR():
"""
if not grad.flags['F_CONTIGUOUS']:
grad = numpy.array(grad, order='F')
if not frames.flags['F_CONTIGUOUS']:
frames = numpy.array(frames, order='F')
return _vlfeat.vl_siftdescriptor(grad, frames)
def vl_imsmooth(I, sigma):
""" I=VL_IMSMOOTH(I,SIGMA) convolves the image I by an isotropic Gaussian
kernel of standard deviation SIGMA. I must be an array of doubles. IF the
array is three dimensional, the third dimension is assumed to span different
channels (e.g. R,G,B). In this case, each channel is convolved
independently.
"""
if not I.flags['F_CONTIGUOUS']:
I = numpy.array(I, order='F')
return _vlfeat.vl_imsmooth(I, sigma)
def vl_ikmeans(data, K, max_niters=200, method='lloyd', verbose=0):
""" Integer K-means.
[C, I] = VL_IKMEANS(X,K) returns the centers of a K-means partitioning of
the data space X the cluster associations I of the data. X must be of class
UINT8. C is of class UINT32.\n\n
VL_IKMEANS() accepts the following options: \n
@param max_niters Maximum number of iterations before giving up (the
algorithm stops as soon as there is no change in the data
to cluster associations).
@param method Algorithm to use ('Lloyd', 'Elkan').
@param verbose Increase the verbosity level.
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_ikmeans(data, K, max_niters, method, verbose)
def vl_ikmeanspush(data, centers, method='lloyd', verbose=0):
""" VL_IKMEANSPUSH Project data on integer K-means partitions
I = VL_IKMEANSPUSH(X,C) projects the data X to the integer K-means clusters
of centers C returning the cluster indices I.
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
if not centers.flags['F_CONTIGUOUS']:
centers = numpy.array(centers, order='F')
return _vlfeat.vl_ikmeanspush(data, centers, method, verbose)
def vl_binsum(H, X, B, DIM=-1):
"""
"""
if not H.flags['F_CONTIGUOUS']:
H = numpy.array(H, order='F')
if not X.flags['F_CONTIGUOUS']:
X = numpy.array(X, order='F')
if not B.flags['F_CONTIGUOUS']:
B = numpy.array(B, order='F')
return _vlfeat.vl_binsum(H, X, B, DIM)
def vl_hikmeans(data, K, nleaves, verb=0, max_iters=200, method='lloyd'):
"""
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_hikmeans(data, K, nleaves, verb, max_iters, method)
def vl_hikmeanspush(tree, data, verb=0, method='lloyd'):
"""
"""
if not data.flags['F_CONTIGUOUS']:
data = numpy.array(data, order='F')
return _vlfeat.vl_hikmeanspush(tree, data, verb, method)
def vl_rgb2gray(data):
""" Rgb 2 gray consersion giving the same result as matlab own conversion
function.
@param data A color image as 3D numpy array.
@return A gray image as 2D numpy array (type is float but numbers are
rounded)
"""
return numpy.round(0.2989 * data[:,:,0] + 0.5870 * data[:,:,1] + 0.1140 * data[:,:,2])
| fish2000/python-vlfeatures | vlfeat/__init__.py | Python | gpl-2.0 | 11,856 | [
"Gaussian"
] | 58dfe97b5c0552bea79b5f8267b3de4965be7b0f56164a2187afed82498dbf68 |
# Lint as: python2, python3
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import contextlib
import functools
import hashlib
import inspect
import math
import numbers
import os
import pkgutil
import re
import threading
import traceback
from REDACTED.tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import hyperparams
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ops
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import retry
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import symbolic
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import tshape
import numpy as np
import six
from six.moves import range
from six.moves import zip
from REDACTED.google_research.model_pruning.python import pruning
# pylint: disable=g-direct-tensorflow-import
from REDACTED.tensorflow.core.framework import node_def_pb2
from REDACTED import rewriter_config_pb2
from REDACTED.tensorflow.python.framework import func_graph
from REDACTED.tensorflow.python.framework import function
from REDACTED.tensorflow.python.ops import init_ops
from REDACTED.tensorflow.python.tpu import tpu_function
from REDACTED.tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
tf.flags.DEFINE_bool('enable_asserts', True,
'If False, we disable all asserts.')
tf.flags.DEFINE_bool('enable_check_numerics', True,
'If False, we bypass calls to CheckNumerics.')
tf.flags.DEFINE_bool('print_debug_tensors', False,
'Whether to print debug tensors.')
tf.flags.DEFINE_string(
'xla_device', '', 'If non-empty, can be cpu, gpu, or tpu (case sensitive)')
tf.flags.DEFINE_bool(
'use_resource_var', True,
'Use ResourceVariable instead of RefVariable; this option is '
'enabled by default and will be removed in the future.')
tf.flags.DEFINE_bool(
'tpu_compatible', False, 'Create variables in a way compatible with TPU. '
'This should be true for any job that will interact '
'with variables or a checkpoint that will be produced '
'or consumed by TPU')
tf.flags.DEFINE_bool(
'pin_vars_to_cpu', False,
'Pin variables to cpu:0. This is useful for weight-sharing / multi-core '
'inference on TPUs in which TPU core variables are managed via '
'TPUPartitionedCallOp.')
tf.flags.DEFINE_bool(
'no_identity_on_vars', False,
'Do not add tf.identity() on vars. This allows TPUPartitionedCallOp to use'
'variable handles directly for weight-sharing / multi-core '
'inference on TPUs.')
tf.flags.DEFINE_bool('disable_py_utils_debug', False,
'If True disables all py_utils.Debug() logs.')
# NOTE: Using absl flags in libraries are frowned upon for several reasons:
#
# 1) They require app.run() or explicit flag parsing, preventing the use of
# these libraries in environments that don't look like normal binaries (colab
# notebooks).
#
# 2) They are process-level globals that cannot be scoped or configured except
# once during binary startup.
#
# Because py_utils is a library, no more flags should be used in this file; the
# existing flags are present for backwards compatibility. Instead, consider
# using a stack-scoped configuration object such as the Cluster object. We guard
# against issue 1 above by using _FromGlobal below, which uses the default value
# of the FLAG even if flags are unparsed.
FLAGS = tf.flags.FLAGS
def _FromGlobal(field_name):
"""Get 'field_name' from a global configuration object.
Currently the global configuration object used is FLAGS, but this may
change to Cluster() or an equivalent stack-scoped config object.
Args:
field_name: The string field name to look up.
Returns:
The value associated with the global configuration string 'field_name'.
"""
# TODO(b/145831327): check the field name in the current cluster object.
# If explicitly set, use that value instead of using the FLAG value.
# Now check the FLAGS object for backwards compatibility.
#
# If not explicitly set, get the field from the FLAGS object. If FLAGS
# have not been parsed yet, the default value of the flag will be used.
return FLAGS[field_name].value
ENQUEUE_OPS = '__lingvo_enqueue_ops'
TPU_EMBEDDING_LOAD_OPS = '__lingvo_tpu_embedding_load_ops'
TPU_EMBEDDING_RETRIEVE_OPS = '__lingvo_tpu_embedding_retrieve_ops'
TPU_EMBEDDING = '__tpu_embedding'
TPU_EMBEDDING_ACTIVATIONS = '__tpu_embedding_activations'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
def Assert(condition, data, *args, **kwargs):
if _FromGlobal('enable_asserts'):
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.debugging.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.debugging.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
return tf.group(
Assert(tf.reduce_all(tf.greater_equal(x, l)), [x], *args, **kwargs),
Assert(tf.reduce_all(tf.less(x, r)), [x], *args, **kwargs))
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'BABELFISH ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x.name) + '_CheckNumerics'
return tf.debugging.check_numerics(x, message if message else x.name, *args,
**kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not _FromGlobal('enable_check_numerics'):
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def Debug(tensor, message='', enabled=True, summarize=100, more=None):
"""Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.
x = py_utils.Debug(x)
When the graph is built a regular log info line will be printed:
-DBG- py_utils_test.py:429 x=Tensor(...
Then when the tensor node is evaluated it will print lines like:
-DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]
WARNING: The code that parses local variable names can fail. E.g. don't write
two Debug() calls on one line or a Debug() call that spans more than one line.
Args:
tensor: A tensor to print.
message: A message to print.
enabled: To enable the debugging.
summarize: Integer with number of tensor values to print.
more: An optional list of additional tensors.
Returns:
The tensor.
"""
if not enabled or _FromGlobal('disable_py_utils_debug'):
return tensor
if more is None:
more = []
stack = inspect.stack()[1][0]
caller = inspect.getframeinfo(stack)
caller_var = ''
caller_more_vars = []
if caller.code_context:
# Rough and likely to fail. But better than nothing.
caller_var = re.compile(r'Debug\((.*?)(\)|,).*$').search(
caller.code_context[0]).groups()[0]
if more:
more_vars = re.compile(r'more=\[(.*?)\].*$').search(
caller.code_context[0]).groups()[0]
caller_more_vars = more_vars.split(',')
the_class = ''
if 'self' in stack.f_locals:
the_class = stack.f_locals['self'].__class__.__name__
header = '-DBG- {}:{}:{}:{} {} '.format(
os.path.basename(caller.filename), the_class, caller.function,
caller.lineno, message)
info = '{}{}={}'.format(header, caller_var, tensor)
for name, val in zip(caller_more_vars, more):
info += ' {}={}'.format(name.strip(), val)
tf.logging.info(info)
if isinstance(tensor, tf.Tensor):
tensors = []
tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]
tensors += [tf.constant('{}='.format(caller_var)), tensor]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}='.format(name.strip())), val]
info = '{}{} {}'.format(header, caller_var, tensor.name)
return tf.Print(tensor, tensors, info, summarize=summarize)
return tensor
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.io.gfile.GFile(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if _FromGlobal('enable_asserts'):
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if _FromGlobal('enable_asserts'):
return with_dependencies(
[tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor`
Raises:
A runtime error if the assertion fails.
"""
if _FromGlobal('enable_asserts'):
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'BABELFISH ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
return with_dependencies([
ops.assert_shape_match(
tf.shape(tensor)[:ndims], expected_shape, msg=msg)
], tensor)
else:
return tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def GetSize(tensor):
shape = GetShape(tensor)
if (isinstance(shape, tf.Tensor) or
any([isinstance(x, tf.Tensor) for x in shape])):
return tf.size(tensor)
return np.prod(shape)
def use_xla(): # pylint: disable=invalid-name
res = _FromGlobal('xla_device')
if res:
assert res in ('', 'cpu', 'gpu', 'tpu')
return res
def use_tpu(): # pylint: disable=invalid-name
res = _FromGlobal('xla_device') == 'tpu'
if res:
assert not _FromGlobal('enable_asserts') # asserts not supported on tpu
return res
def tpu_compat(): # pylint: disable=invalid-name
return use_tpu() or _FromGlobal('tpu_compatible')
def use_resource_variables(): # pylint: disable=invalid-name
return _FromGlobal('use_resource_var') or tpu_compat()
@contextlib.contextmanager
def outside_all_rewrites(): # pylint: disable=invalid-name
with tf.control_dependencies(None):
yield
class _ThreadLocalStack(threading.local):
def __init__(self):
super(_ThreadLocalStack, self).__init__()
self.stack = []
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
_tpu_device_assignment = None
def SetTpuDeviceAssignment(tpu_device_assignment):
global _tpu_device_assignment
if _tpu_device_assignment is not None:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment
_tpu_device_assignment = None
def GetTpuDeviceAssignment():
return _tpu_device_assignment
def SessionConfig(soft_placement=True,
inline=True,
cluster_def=None,
disable_meta_optimizer=False):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
disable_meta_optimizer: Turns off grappler/metagraph optimizer.
Returns:
A TF session config proto.
"""
session_config = tf.config_pb2.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
if disable_meta_optimizer:
# Useful if start-up time is critical.
session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
_NAME_PATTERN = re.compile('[A-Za-z_][A-Za-z0-9_]*')
class NestedMap(dict):
"""A simple helper to maintain a dict.
It is a sub-class of dict with the following extensions/restrictions:
- It supports attr access to its members (see examples below).
- Member keys have to be valid identifiers.
E.g.::
>>> foo = NestedMap()
>>> foo['x'] = 10
>>> foo.y = 20
>>> assert foo.x * 2 == foo.y
"""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
# keys in this list are not allowed in a NestedMap.
_RESERVED_KEYS = set(dir(dict))
# sentinel value for deleting keys used in Filter.
_DELETE = object()
def __init__(self, *args, **kwargs):
super(NestedMap, self).__init__(*args, **kwargs)
for key in self.keys():
assert isinstance(key, six.string_types), (
'Key in a NestedMap has to be a six.string_types. Currently type: %s,'
' value: %s' % (str(type(key)), str(key)))
NestedMap.CheckKey(key)
assert key not in NestedMap._RESERVED_KEYS, ('%s is a reserved key' % key)
def __setitem__(self, key, value):
# Make sure key is a valid expression and is not one of the reserved
# attributes.
assert isinstance(key, six.string_types), (
'Key in a NestedMap has to be a six.string_types. Currently type: %s, '
'value: %s' % (str(type(key)), str(key)))
NestedMap.CheckKey(key)
assert key not in NestedMap._RESERVED_KEYS, ('%s is a reserved key' % key)
super(NestedMap, self).__setitem__(key, value)
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError('%s; available attributes: %s' %
(e, sorted(list(self.keys()))))
def __delattr__(self, name):
try:
del self[name]
except KeyError as e:
raise AttributeError('%s; available attributes: %s' %
(e, sorted(list(self.keys()))))
def copy(self): # Don't delegate w/ super: dict.copy() -> dict.
return NestedMap(self)
def __deepcopy__(self, unused_memo):
"""Deep-copies the structure but not the leaf objects."""
return self.DeepCopy()
def DeepCopy(self):
"""Deep-copies the structure but not the leaf objects."""
return self.Pack(self.Flatten())
@staticmethod
def FromNestedDict(x):
"""Converts every dict in nested structure 'x' to a NestedMap."""
if isinstance(x, dict):
res = NestedMap()
for k, v in six.iteritems(x):
res[k] = NestedMap.FromNestedDict(v)
return res
elif isinstance(x, (list, tuple)):
return type(x)(NestedMap.FromNestedDict(v) for v in x)
else:
return x
@staticmethod
def CheckKey(key):
"""Asserts that key is valid NestedMap key."""
if not (isinstance(key, six.string_types) and _NAME_PATTERN.match(key)):
raise ValueError('Invalid NestedMap key \'{}\''.format(key))
def GetItem(self, key):
"""Gets the value for the nested `key`.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
Returns:
The value for the given nested key.
Raises:
KeyError if a key is not present.
"""
current = self
# Note: This can't support lists. List keys are ambiguous as underscore is
# not reserved for list indexing but also allowed to be used in keys.
# E.g., this is a valid nested map where the key 'a_0' is not well defined
# {'a_0': 3, 'a': [4]}.
for k in key.split('.'):
current = current[k]
return current
def Get(self, key, default=None):
"""Gets the value for nested `key`, returns `default` if key does not exist.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
default: Optional default value, defaults to None.
Returns:
The value for the given nested key or `default` if the key does not exist.
"""
try:
return self.GetItem(key)
# TypeError is raised when an intermediate item is a list and we try to
# access an element of it with a string.
except (KeyError, TypeError):
return default
def Set(self, key, value):
"""Sets the value for a nested key.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
value: The value to insert.
Raises:
ValueError if a sub key is not a NestedMap or dict.
"""
current = self
sub_keys = key.split('.')
for i, k in enumerate(sub_keys):
self.CheckKey(k)
# We have reached the terminal node, set the value.
if i == (len(sub_keys) - 1):
current[k] = value
else:
if k not in current:
current[k] = NestedMap()
if not isinstance(current[k], (dict, NestedMap)):
raise ValueError('Error while setting key {}. Sub key "{}" is of type'
' {} but must be a dict or NestedMap.'
''.format(key, k, type(current[k])))
current = current[k]
def _RecursiveMap(self, fn, flatten=False):
"""Traverse recursively into lists and NestedMaps applying `fn`.
Args:
fn: The function to apply to each item (leaf node).
flatten: If true, the result should be a single flat list. Otherwise the
result will have the same structure as this NestedMap.
Returns:
The result of applying fn.
"""
def Recurse(v, key=''):
"""Helper function for _RecursiveMap."""
if isinstance(v, NestedMap):
ret = [] if flatten else NestedMap()
deleted = False
for k in sorted(v.keys()):
res = Recurse(v[k], key + '.' + k if key else k)
if res is self._DELETE:
deleted = True
continue
elif flatten:
ret += res
else:
ret[k] = res
if not ret and deleted:
return self._DELETE
return ret
elif isinstance(v, list):
ret = []
deleted = False
for i, x in enumerate(v):
res = Recurse(x, '%s[%d]' % (key, i))
if res is self._DELETE:
deleted = True
continue
elif flatten:
ret += res
else:
ret.append(res)
if not ret and deleted:
return self._DELETE
return ret
else:
ret = fn(key, v)
if flatten:
ret = [ret]
return ret
res = Recurse(self)
if res is self._DELETE:
return [] if flatten else NestedMap()
return res
def Flatten(self):
"""Returns a list containing the flattened values in the `.NestedMap`.
Unlike py_utils.Flatten(), this will only descend into lists and NestedMaps
and not dicts, tuples, or namedtuples.
"""
return self._RecursiveMap(lambda _, v: v, flatten=True)
def FlattenItems(self):
"""Flatten the `.NestedMap` and returns <key, value> pairs in a list.
Returns:
A list of <key, value> pairs, where keys for nested entries will be
represented in the form of `foo.bar[10].baz`.
"""
return self._RecursiveMap(lambda k, v: (k, v), flatten=True)
def Pack(self, lst):
"""Returns a copy of this with each value replaced by a value in lst."""
assert len(self.FlattenItems()) == len(lst)
v_iter = iter(lst)
return self._RecursiveMap(lambda unused_k, unused_v: next(v_iter))
def Transform(self, fn):
"""Returns a copy of this `.NestedMap` with fn applied on each value."""
return self._RecursiveMap(lambda _, v: fn(v))
def IsCompatible(self, other):
"""Returns true if self and other are compatible.
If x and y are two compatible `.NestedMap`, `x.Pack(y.Flatten())` produces y
and vice versa.
Args:
other: Another `.NestedMap`.
"""
items = self._RecursiveMap(lambda k, _: k, flatten=True)
other_items = other._RecursiveMap(lambda k, _: k, flatten=True) # pylint: disable=protected-access
return items == other_items
def Filter(self, fn):
"""Returns a copy with entries where fn(entry) is True."""
return self.FilterKeyVal(lambda _, v: fn(v))
def FilterKeyVal(self, fn):
"""Returns a copy of this `.NestedMap` filtered by fn.
If fn(key, entry) is True, the entry is copied into the returned NestedMap.
Otherwise, it is not copied.
Args:
fn: a callable of (string, entry)->boolean.
Returns:
A `.NestedMap` contains copied entries from this `'.NestedMap`.
"""
return self._RecursiveMap(lambda k, v: v if fn(k, v) else self._DELETE)
def _ToStrings(self):
"""Returns debug strings in a list for this `.NestedMap`."""
kv = self.FlattenItems()
maxlen = max([len(k) for k, _ in kv]) if kv else 0
return sorted([k + ' ' * (4 + maxlen - len(k)) + str(v) for k, v in kv])
def DebugString(self):
"""Returns a debug string for this `.NestedMap`."""
return '\n'.join(self._ToStrings())
def VLog(self, level=None, prefix=None):
"""Logs the debug string at the level."""
if level is None:
level = 0
if prefix is None:
prefix = 'nmap: '
for l in self._ToStrings():
tf.logging.vlog(level, '%s %s', prefix, l)
class _Unique(object):
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (id(v) in self._vset):
return False
else:
self._vset.add(id(v))
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper(object):
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler(object):
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit(object):
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: An optional name for the operation.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit(object):
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed):
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random.uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random.uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
def IsDefaultParamInit(p):
return (p.method == 'xavier' and p.scale == _DEFAULT_XAVIER_INIT and
p.seed is None)
def WeightParams(shape,
init=None,
dtype=None,
collections=None,
xla_num_partitions=None,
xla_partition_dim=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
p.Define('xla_num_partitions', xla_num_partitions, 'partitoning')
p.Define('xla_partition_dim', xla_partition_dim, 'partitoning')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
def FindNeededInList(tensor_list, endpoints):
"""Return tensors from tensor_list needed to compute any of endpoints."""
all_needed = FindNeeded(endpoints)
return [t for t in tensor_list if t.name in all_needed]
class _CollectionGetter(object):
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
return key.replace('[', '_').replace(']', '')
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With OPPORTUNISTIC_VARIABLE_REUSE==True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE_KEY = ('__lingvo_opportunistic_variable_reuse',)
_get_opportunistic_variable_reuse = _CollectionGetter(
_OPPORTUNISTIC_VARIABLE_REUSE_KEY, lambda: [False])
_VARIABLE_RENAME_RULES_KEY = ('__lingvo_variable_rename_rules',)
_get_rename_rules_stack = _CollectionGetter(_VARIABLE_RENAME_RULES_KEY,
lambda: [])
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
opportunistic_var_reuse = _get_opportunistic_variable_reuse()
old_val = opportunistic_var_reuse[0]
opportunistic_var_reuse[0] = enable_opportunistic_reuse
yield
opportunistic_var_reuse[0] = old_val
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
opportunistic_var_reuse = _get_opportunistic_variable_reuse()
return opportunistic_var_reuse[0]
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
rename_rules_stack = _get_rename_rules_stack()
rename_rules_stack.append(renames)
yield
rename_rules_stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _get_rename_rules_stack():
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
def GenerateSeedFromName(name):
"""Generate a random seed from a name string."""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return int(md5.hexdigest(), 16) % (2**31 - 1)
# To keep track of all the variables ever gets created by the CreateVariable
# routine below.
_ALL_VARS_KEY = ('__lingvo_all_vars',)
_get_all_vars = _CollectionGetter(_ALL_VARS_KEY, lambda: {})
_VARIABLE_SHAPE_PREFIXES = _ThreadLocalStack().stack
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.append(shape_prefix)
yield
_VARIABLE_SHAPE_PREFIXES.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES
def GetFanInFanOut(shape):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < 1:
return 1, 1
elif len(shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return shape[0], shape[0]
else:
receptive_field_size = 1
for s in shape[:-2]:
receptive_field_size *= s
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
tf.identity(var), var pair. The tf.identity() node is colocated
with var. In the case of FLAGS.no_identity_on_vars, simply returns
a var, var pair.
"""
p = params.Copy()
assert isinstance(p, hyperparams.Params)
dtype = p.dtype
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
p.Set(shape=shape)
dim0 = 1
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
seed = GenerateSeedFromName(var_name)
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
init_dtype = dtype.real_dtype
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout'
]:
v_init = init_ops.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = init_ops.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling']:
v_init = init_ops.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = init_ops.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
# pylint: disable=unused-argument
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
# pylint: enable=unused-argument
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = init_ops.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
else:
assert False, 'init_type not supported.'
if dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype, partition_info):
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype, partition_info)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
def GetVar(reuse=reuse):
"""reuse: Whether to reuse the variables."""
var_shape = GetVariableShapePrefixes() + list(shape)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=scope.use_resource or use_resource_variables())
with tf.variable_scope(var_scope), \
tf.variable_scope(var_name, reuse=reuse) as scope:
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.get_variable(
'var',
var_shape,
dtype,
v_init,
collections=collections,
trainable=trainable,
validate_shape=True if var_shape is not None else False,
synchronization=synchronization,
aggregation=aggregation)
else:
with tf.device(''):
return tf.get_variable(
'var',
var_shape,
dtype,
v_init,
collections=collections,
trainable=trainable,
validate_shape=True if var_shape is not None else False,
synchronization=synchronization,
aggregation=aggregation)
if _get_opportunistic_variable_reuse()[0]:
try:
var = GetVar()
except ValueError: # Possibly the variable already exists
var = GetVar(reuse=True)
else:
var = GetVar()
# Partitioning annotation
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p, ('Cached config:\n %s vs new config:\n %s' %
(cached.ToText(), p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.Copy()
for col in p.collections:
tf.add_to_collection(col, var)
if _FromGlobal('no_identity_on_vars'):
#with tf.device(var.device):
return var, var
else:
# This tf.identity colocated with var.
#with tf.device(var.device):
#if p.xla_num_partitions:
# xla_ref = xla_sharding.split(var, p.xla_partition_dim, p.xla_num_partitions, use_sharding_op=True)
# return xla_ref, var
return tf.identity(var), var
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = []
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.append(global_step_tensor)
try:
yield
except:
raise
finally:
_GLOBAL_STEP_STACK.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK:
return _GLOBAL_STEP_STACK[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(
GetGlobalVariableScope(), use_resource=use_resource_variables()):
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity(object):
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars, variable_loading_rules, var_ignore_rules):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
vars_to_load = []
for model_var in all_vars:
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match or any(
re.match(r, model_var.name) for r in var_ignore_rules):
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
tf.logging.info('Loading %s from %s', model_var, checkpoint_var_name)
vars_to_load.append((checkpoint_var_name, model_var))
break
return vars_to_load
def OverrideVarsFromCheckpoint(sess, all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Overrides variables from a provided checkpoint."""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules)
if not vars_to_load:
raise ValueError(('Variable loading rules did not match any vars. '
'All known: %r') % [v.name for v in all_vars])
load_var_names = sorted([v.name for _, v in vars_to_load])
tf.logging.info('Overriding vars from checkpoint: %r', load_var_names)
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
tf.train.Saver(var_list=unique_vars_to_load).restore(sess, checkpoint_path)
vars_to_load = remaining_vars_to_load
def OverrideVarsFromCheckpoints(session, all_vars, ckpts_loading_rules):
"""Overrides model variables from checkpoints.
Args:
session: Tensorflow session.
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
var_refs_to_override = [
var[1].experimental_ref()
for var in _GetVarsToLoad(all_vars, loading_rules[0], loading_rules[1])
]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
OverrideVarsFromCheckpoint(session, all_vars, ckpt_path, loading_rules[0],
loading_rules[1])
var_refs_overridden.update(var_refs_to_override)
tf.logging.info('Model variables overridden: %s', var_refs_overridden)
def ComputeGradientsSimple(loss, all_vars, grad_aggregation_method,
colocate_gradients_with_ops, gate_gradients):
return tf.gradients(
loss,
all_vars,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def ComputeTpuEmbeddingGradients(loss, activation_dict, tpu_embedding):
"""Returns a TpuEmbedding SendGradient op.
Args:
loss: The loss to backprop from.
activation_dict: String feature -> embedding activations dict.
tpu_embedding: TPUEmbedding instance.
"""
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
loss *= tf.constant(1.0 / shards, dtype=loss.dtype)
grads = tf.gradients(loss, list(activation_dict.values()))
feature_to_gradient_dict = py_collections.OrderedDict(
zip(list(activation_dict.keys()), grads))
send_gradient_op = tpu_embedding.generate_send_gradients_op(
feature_to_gradient_dict)
return send_gradient_op
def _ComputeGradientsTpu(loss,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None,
use_bf16_gradients_ar=False):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss: The loss to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce.
Returns:
Gradients to be passed back.
Raises:
ValueError: upon invalid arguments.
"""
if not skip_zero_gradients:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss *= tf.constant(1.0 / shards, dtype=loss.dtype)
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(loss, all_vars, grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients)
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
if use_bf16_gradients_ar:
g = tf.cast(g, tf.bfloat16)
with tf.ops.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads
class VarGrad(object):
"""A class that holds a variable and a gradient."""
_VAR_GRAD = py_collections.namedtuple('VarGradNamedTuple', ['var', 'grad'])
def __init__(self, *args, **kwargs):
self._var_grad = self._VAR_GRAD(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
return iter(self._var_grad)
def __repr__(self):
return 'VarGrad(%r, %r)' % (self._var_grad.var, self._var_grad.grad)
def ComputeGradients(
loss,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None,
use_bf16_gradients_ar=False):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss: A scalar Tensor.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
* None: do not skip zero gradients;
* `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
* `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce. This applies to TPU only.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
"""
loss = HasRank(loss, 0)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss'.
trainable_variables = set(tf.trainable_variables())
dependent_ops_and_tensors = set(FindNeeded([loss]))
def Needed(v):
if isinstance(v, tf.Variable):
if v not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu,
skip_zero_gradients=skip_zero_gradients,
use_bf16_gradients_ar=use_bf16_gradients_ar)
else:
take_grad = ComputeGradientsSimple
grads = take_grad(loss, filtered_vlist, grad_aggregation_method,
colocate_gradients_with_ops, gate_gradients)
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
# TPU training is not compatible with the variable name check below when
# control flow v2 is enabled. The main reason is the body function will be
# encapsulated as a TF function while variables will be lifted out, and as a
# result dependent_ops_and_tensors will not contain any variables. See
# b/150689507 for more info.
if not tf.compat.v1.control_flow_v2_enabled():
# Check that gradients for variables that are not needed by current task is
# empty.
def CheckGrad(vg):
if vg.var.name not in dependent_ops_and_tensors and vg.grad is not None:
err_msg = ('Variable %s is not a dependent of %s, expect '
'gradient be None, but got %s. This should not happen, '
'please contact the owner of b/150689507 for further '
'investigation.' % (str(vg.var), str(loss), str(vg.grad)))
assert False, err_msg
return True
var_grads = var_grads.Filter(CheckGrad)
# Removes pairs whose grad is None.
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var, grad, scale):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any(
[HasNanOrInf(tf.math.real(x)),
HasNanOrInf(tf.math.imag(x))])
return tf.reduce_any(
tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return v not in tf.get_collection(SKIP_LP_REGULARIZATION)
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
tf.logging.info('AdjustGradientsWithLpLoss: %s', v.name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurances in 'ids'.
counts = tf.math.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.math.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif var not in tf.get_collection(SKIP_LP_REGULARIZATION):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in six.iteritems(x):
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def AddToPruningCollections(weight,
mask,
threshold,
gradient=None,
old_weight=None,
old_old_weight=None):
"""Add mask, threshold, and weight vars to their respective collections."""
if mask not in tf.get_collection(pruning.MASK_COLLECTION):
tf.add_to_collection(pruning.WEIGHT_COLLECTION, weight)
tf.add_to_collection(pruning.MASK_COLLECTION, mask)
tf.add_to_collection(pruning.THRESHOLD_COLLECTION, threshold)
# Add gradient, old_weight, and old_old_weight to collections approximating
# gradient and hessian, where old_weight is the weight tensor one step
# before and old_old_weight is the weight tensor two steps before.
if gradient is not None:
assert old_weight is not None
assert old_old_weight is not None
tf.add_to_collection(pruning.WEIGHT_GRADIENT_COLLECTION, gradient)
tf.add_to_collection(pruning.OLD_WEIGHT_COLLECTION, old_weight)
tf.add_to_collection(pruning.OLD_OLD_WEIGHT_COLLECTION, old_old_weight)
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
avg = sum_reduction_fn(values * tf.cast(weights, values.dtype)) / tf.cast(
total_weight, values.dtype)
return avg, total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in six.iteritems(m):
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(six.iteritems(lists_of_metrics)):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in six.iteritems(m):
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(six.iteritems(lists_of_per_example)):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set([
k for loss_metrics, _ in loss_metric_weight_pairs
for k in six.iterkeys(loss_metrics)
])
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def _AddVN(p, x, step=None):
assert p.vn.scale is not None
seed = p.vn.seed
if seed and step:
seed += step * 203984
noises = tf.cast(p.vn.scale, x.dtype) * tf.random.normal(
tf.shape(x), stddev=1.0, seed=seed, dtype=x.dtype)
return x + noises
def AddGlobalVN(params, weights):
"""Adds variational noise to weights if specified by params."""
p = params
if p.vn.global_vn:
weights = _AddVN(p, weights)
return weights
def AddPerStepVN(params, weights, step=None):
"""Adds per-setp variational noise to weights if specified by params."""
p = params
if p.vn.per_step_vn:
weights = _AddVN(p, weights, step)
return weights
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None):
"""Returns a hyperparams for variational noise."""
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
return p
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# configuration of base_layer, which will be updated by parent configuration in
# CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
def GetStepSeed():
"""Gets step_seed."""
step_seed_tensors = tf.get_default_graph().get_collection_ref('step_seed')
if not step_seed_tensors:
ResetStepSeed()
return GetStepSeed()
elif len(step_seed_tensors) == 1:
return step_seed_tensors[0]
else:
raise ValueError('Multiple tensors in step_seed collection.')
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
new_step_seed = tf.convert_to_tensor(seed, dtype=tf.int64)
step_seed_tensors = tf.get_default_graph().get_collection_ref('step_seed')
if len(step_seed_tensors) == 1:
step_seed_tensors[0] = new_step_seed
elif not step_seed_tensors:
tf.add_to_collection('step_seed', new_step_seed)
else:
raise ValueError('Multiple tensors in step_seed collection.')
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
ResetStepSeed(step_seed + 1)
return step_seed
def GenerateStepSeedPair(p, global_step, op_seed=None):
"""Generates a seed pair for deterministic random operations in functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
global_step: The global step.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(global_step, seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, seeds, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.ops.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - batch_norm_stats) * decay
has_nan_or_inf = tf.reduce_any(
tf.math.logical_or(
tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))
update_delta = tf.cond(has_nan_or_inf,
lambda: tf.zeros_like(update_delta),
lambda: update_delta)
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
bn_update_dict = _get_batch_norm_updates_dict()
assert bn_update.name not in bn_update_dict
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
'%s is probably not a valid batch normalization update op.'
' Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_KEY = 'sample_step'
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
stack = tf.get_collection_ref(_SAMPLE_STEP_KEY)
try:
stack.append(step)
yield step
finally:
stack.pop()
def _GetSampleStep():
stack = tf.get_collection(_SAMPLE_STEP_KEY)
return stack[-1] if stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: tf.abs(v)**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None):
"""Pads x to `length` using `pad_val` along the second dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the second dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[1] > length or x.shape[i] != shape[i]
where i != 1.
Args:
x: the tensor to be padded with shape [batch, seq_len, ...].
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[1]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[1][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[1]
pad_len = length - slen
pad = tf.scatter_nd([[1, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[1] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, broadcast=True, use_select=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x))
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must be the same shape as 'x' if specified.
broadcast: Whether to broadcast the padding shape to the shape of 'x'. You
almost certainly want this to be true as it matches how padding would be
expanded if applied arithmetically.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.math.logical_or(
tf.equal(padding, 0.0), tf.equal(padding, 1.0))), [padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros_like(x)
if broadcast:
# Broadcast padding to the full shape.
padding = tf.cast(padding, x.dtype) * tf.ones_like(x)
return tf.where(padding > tf.zeros_like(padding), padded, x)
else:
result = x * tf.cast(1.0 - padding, x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
return result
def LengthsFromPaddings(paddings):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Args:
paddings: a tensor with shape [batch, length].
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
paddings = tf.cast(paddings, tf.int32)
# Find the last unpadded value.
# Cannot just use tf.reduce_sum because there might be leading paddings.
# Everything after the last unpadded value has 1.0 - paddings == 0.0, so in
# the cumsum below they will have the same value.
cumsum = tf.cumsum(1 - paddings, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value gives us num_padded + 1
# and so counting the number that differs gives us num_padded - 1.
length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for all 0 paddings.
all_zero_paddings = tf.equal(tf.reduce_sum(1 - paddings, axis=1), 0)
return tf.where(all_zero_paddings, tf.zeros_like(length), length)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lenghts as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.cast(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)
input1_seq_dim = tf.cast(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(
seq_length0,
tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(
seq_length1,
tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents
of each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probablistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
def Backward(op, *dy):
"""The backward function that rematerializes forward outputs."""
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.math.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
# Skip op.inputs[0] which is initial_step_seed.
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in op.inputs[1:]]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
ResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=dy)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return (tf.zeros_like(initial_step_seed),) + tuple(dxs_final)
xs_dtypes = [x.dtype for x in xs]
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
@tf.Defun(initial_step_seed.dtype, *xs_dtypes, python_grad_func=Backward)
def Forward(initial_step_seed, *fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*fwd_xs)
# Some sanity check.
assert not GetExtraInputs()
assert not GetExtraArgs()
assert not GetExtraVars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = Forward(initial_step_seed, *xs)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
ResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = {
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
}
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if not isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
raise tf.errors.InvalidArgumentError(None, None,
'func is not a _DefinedFunction.')
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None,
'Defun {} is not in the graph .'.format(func.definition.signature.name))
stateful_ops = []
# Recursively search for stateful random op.
nodes = py_collections.deque(func.definition.node_def)
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.op)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
else:
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
loss *= tf.pow(1.0 - probs, gamma)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
if gamma is not None and gamma != 0:
probs = tf.exp(-loss)
loss *= tf.pow(1.0 - probs, gamma)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the REDACTED workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('third_party/py/lingvo/', '',
1)).splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
# pylint: disable=unused-argument
def NoOP(*args, **kwargs):
return tf.no_op()
# pylint: enable=unused-argument
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
yield
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _DefineDefun(fwd, bak, args):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
bak: A callable xs, ys, dys: Nested Structure -> dxs: Nested Structure. The
custom backprop function for fwd.
args: A Nested Structure of tf.Tensor.
Returns:
A NestedMap w/ fields:
defun: A tf.Defun wraps fwd
args: A Nested Structure of tf.DType
rets: A Nested Structure of tf.DType
"""
assert fwd is not None
# fwd signature (tf.Tensor dtypes).
get_dtype = lambda x: x.dtype
sigs = NestedMap(args=Transform(get_dtype, args))
get_shape = lambda x: x.shape
arg_shapes = Transform(get_shape, args)
compiled = use_xla()
noinline = not compiled
def Backward(op, *args):
assert bak is not None
xs = Pack(sigs.args, op.inputs)
# Note: sigs.rets will be set during the Forward call.
ys = Pack(sigs.rets, op.outputs)
dys = Pack(sigs.rets, args)
with RemoveAssertContext(remove=noinline):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
@tf.Defun(*Flatten(sigs.args), python_grad_func=Backward, noinline=noinline)
def Forward(*args):
for arg, shape in zip(args, Flatten(arg_shapes)):
arg.set_shape(shape)
with RemoveAssertContext(remove=noinline):
rets = fwd(Pack(sigs.args, args))
sigs.rets = Transform(get_dtype, rets)
return Flatten(rets)
sigs.defun = Forward
return sigs
def CallDefun(fwd, bak, args):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
bak: A callable xs, ys, dys: Nested Structure -> dxs: Nested Structure. The
custom backprop function for fwd.
args: A Nested Structure of tf.Tensor.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
sigs = _DefineDefun(fwd, bak, args)
flat_rets = sigs.defun(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(sigs.rets, flat_rets)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(loop_state=loop_state)
dtypes = state.Transform(lambda x: x.dtype).Flatten()
@tf.Defun(*dtypes)
def LoopCond(*args):
s = state.Pack(args)
return cond(s.loop_state)
@tf.Defun(*dtypes)
def LoopBody(*args):
s = state.Pack(args)
s.loop_state = body(s.loop_state)
return s.Flatten()
return state.Pack(
tf.While(input_=state.Flatten(), cond=LoopCond, body=LoopBody)).loop_state
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: The variable's TF `Operation`. It could be one of VarHandleOp,
Variable and VariableV2.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type == 'VarHandleOp':
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
assert var_op.type in ['Variable', 'VariableV2']
return var_op.outputs[0]
_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')
_get_tpu_summary_tensors = _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY,
lambda: [])
def AddTpuSummaryTensor(name, value, weight=1.0):
"""Adds tensor to global collection of summaries.
This needs to be used in situations where tf.summary() could be used but
currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in
low level code to add summary tensors to global collection of summaries.
Then recover all summary tensors from global collection by calling
py_utils.GetTpuSummaryTensors() from top level code (for example from
ComputeLoss method of BaseTask).
In addition to 'name' argument, current tensorflow name scope is also
captured and added to the metric name. This way for example summaries from
a repeated layer will appear as separate graphs in the tensorboard.
Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for
the exact definition of weight for eval metrics.
Args:
name: metric name
value: metric value tensor
weight: weight tensor for weighted metrics
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
x = NestedMap()
x.name = name
x.value = value, tf.convert_to_tensor(weight)
x.name_scope = tf.get_default_graph().get_name_scope()
tpu_summary_tensors.append(x)
def GetTpuSummaryTensors():
"""Returns summary tensors from global collection.
Returns:
A dict containing str keys and (metric, weight) pairs as values
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
return {
'%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value
for x in tpu_summary_tensors
}
def ComputationShape(split_size):
"""Decides the computation shape based on the split_size."""
computation_shape = None
if split_size == 1:
computation_shape = [1, 1, 1, 1]
elif split_size == 2:
computation_shape = [1, 1, 1, 2]
elif split_size == 4:
computation_shape = [2, 1, 1, 2]
elif split_size == 8:
computation_shape = [2, 2, 1, 2]
elif split_size == 16:
computation_shape = [4, 2, 1, 2]
elif split_size == 32:
computation_shape = [4, 4, 1, 2]
elif split_size == 64:
computation_shape = [4, 8, 1, 2]
elif split_size == 128:
computation_shape = [8, 8, 1, 2]
elif split_size == 256:
computation_shape = [8, 16, 1, 2]
elif split_size == 512:
computation_shape = [16, 16, 1, 2]
elif split_size == 2048:
computation_shape = [32, 32, 1, 2]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
def GetExtraVars():
"""Returns the captured variables by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.variable_captures
return function.get_extra_vars()
def GetExtraInputs():
"""Returns the captured input tensors by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.external_captures
return function.get_extra_inputs()
def GetExtraArgs():
"""Returns the corresponding function arguments for the captured inputs."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.internal_captures
return function.get_extra_args()
| mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/core/py_utils.py | Python | apache-2.0 | 145,392 | [
"Gaussian"
] | f1995767f6ca3ad7725c1398c638ae850496e70b7ea91d503b19deb6641508a9 |
###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Unified interface to SciPy function fitting routines.
This module provides a unified interface to the fitting of functions to data
with SciPy. All fitting routines conform to the following simple method
interface:
- __init__(p) : set parameters of interpolation function e.g. polynomial degree
- fit(x, y) : fit given input-output data
- __call__(x) / eval(x) : evaluate function on new input data
Each interpolation routine falls in one of two categories: scatter fitting or
grid fitting. They share the same interface, only differing in the definition
of input data x.
Scatter-fitters operate on unstructured scattered input data (i.e. not on a
grid). The input data consists of a sequence of ``x`` coordinates and a
sequence of corresponding ``y`` data, where the order of the ``x`` coordinates
does not matter and their location can be arbitrary. The ``x`` coordinates can
have an arbritrary dimension (although most classes are specialised for 1-D or
2-D data). If the dimension is bigger than 1, the coordinates are provided as
an array of column vectors. These fitters have ScatterFit as base class.
Grid-fitters operate on input data that lie on a grid. The input data consists
of a sequence of x-axis tick sequences and the corresponding array of y data.
These fitters have GridFit as base class.
The module is organised as follows:
Scatter fitters
---------------
- :class:`ScatterFit` : Abstract base class for scatter fitters
- :class:`LinearLeastSquaresFit` : Fit linear regression model to data with SVD
- :class:`Polynomial1DFit` : Fit polynomial to 1-D data
- :class:`Polynomial2DFit` : Fit polynomial to 2-D data
- :class:`PiecewisePolynomial1DFit` : Fit piecewise polynomial to 1-D data
- :class:`Independent1DFit` : Interpolate N-dimensional matrix along given axis
- :class:`Delaunay2DScatterFit` : Interpolate scalar function of 2-D data,
based on Delaunay triangulation
(scattered data version)
- :class:`NonLinearLeastSquaresFit` : Fit a generic function to data, based on
non-linear least squares optimisation.
- :class:`GaussianFit` : Fit Gaussian curve to multi-dimensional data
- :class:`Spline1DFit` : Fit a B-spline to 1-D data
- :class:`Spline2DScatterFit` : Fit a B-spline to scattered 2-D data
- :class:`RbfScatterFit` : Do radial basis function (RBF) interpolation
Grid fitters
------------
- :class:`GridFit` : Abstract base class for grid fitters
- :class:`Spline2DGridFit` : Fit a B-spline to 2-D data on a rectangular grid
Helper functions
----------------
- :func:`squash` : Flatten array, but not necessarily all the way to a 1-D array
- :func:`unsquash' : Restore an array that was reshaped by :func:`squash`
- :func:`sort_grid` : Ensure that the coordinates of a rectangular grid are in
ascending order
- :func:`desort_grid` : Undo the effect of :func:`sort_grid`
- :func:`vectorize_fit_func` : Factory that creates vectorised version of
function to be fitted to data
- :func:`randomise` : Randomise fitted function parameters by resampling
residuals
"""
from __future__ import absolute_import
import warnings
from .generic import * # noqa: F403 (simplifies API export)
from .utils import * # noqa: F403
from .delaunay import * # noqa: F403
from .gaussian import * # noqa: F403
from .linlstsq import * # noqa: F403
from .nonlinlstsq import * # noqa: F403
from .poly import * # noqa: F403
from .rbf import * # noqa: F403
from .spline import * # noqa: F403
def _setup_test():
"""Create test() method that will run unit tests via nose."""
args = ['', '--exe', '-w', __path__[0]] # noqa: F405 (__path__ is special)
try:
import nose as _nose
except ImportError:
def test():
warnings.warn('Could not import nose. Unit tests not available.')
return test
else:
import functools
return functools.partial(_nose.run, 'scikits.fitting', argv=args)
test = _setup_test()
| ludwigschwardt/scikits.fitting | scikits/fitting/__init__.py | Python | bsd-3-clause | 4,864 | [
"Gaussian"
] | 857b475d34d32ca33dfef68f270465ccd9465bcfd29e7e300c30c87b997f9bb8 |
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import (DeleteWorkspace, CreateSampleWorkspace,
AddSampleLog, EditInstrumentGeometry,
CloneWorkspace, CompareWorkspaces, FindEPP)
from testhelpers import run_algorithm
from mantid.api import AnalysisDataService
from scipy.constants import N_A, hbar, k
import numpy as np
class ComputeCalibrationCoefVanTest(unittest.TestCase):
def setUp(self):
input_ws = CreateSampleWorkspace(
Function="User Defined",
UserDefinedFunction="name=LinearBackground, " +
"A0=0.3;name=Gaussian, PeakCentre=5, Height=10, Sigma=0.3",
NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1,
BankDistanceFromSample=4.0)
self._input_ws = input_ws
self._table = FindEPP(input_ws, OutputWorkspace="table")
AddSampleLog(self._input_ws, LogName='wavelength', LogText='4.0',
LogType='Number', LogUnit='Angstrom')
def test_output(self):
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
# Output = Vanadium ws
self.assertEqual(wsoutput.getRun().getLogData('run_title').value,
self._input_ws.getRun().getLogData('run_title').value)
# Size of output workspace
self.assertEqual(wsoutput.getNumberHistograms(),
self._input_ws.getNumberHistograms())
DeleteWorkspace(wsoutput)
return
def test_sum(self):
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
# check whether sum is calculated correctly, for theta=0, dwf=1
y_sum = sum(self._input_ws.readY(0)[27:75])
e_sum = np.sqrt(sum(np.square(self._input_ws.readE(0)[27:75])))
self.assertAlmostEqual(y_sum, wsoutput.readY(0)[0])
self.assertAlmostEqual(e_sum, wsoutput.readE(0)[0])
DeleteWorkspace(wsoutput)
def test_dwf_using_default_temperature(self):
outputWorkspaceName = "output_ws"
# change theta to make dwf != 1
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 293.0)
DeleteWorkspace(wsoutput)
def test_temperature_from_sample_log(self):
self._input_ws.mutableRun().addProperty('temperature', 0.0, True)
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
DeleteWorkspace(wsoutput)
def test_temperature_input_overrides_sample_log(self):
self._input_ws.mutableRun().addProperty('temperature', 567.0, True)
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName,
Temperature=0.0)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
DeleteWorkspace(wsoutput)
def test_input_not_modified(self):
backup = CloneWorkspace(self._input_ws)
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
self.assertTrue(CompareWorkspaces(backup, self._input_ws)[0])
DeleteWorkspace(backup)
def tearDown(self):
if AnalysisDataService.doesExist(self._input_ws.name()):
DeleteWorkspace(self._input_ws)
if AnalysisDataService.doesExist(self._table.name()):
DeleteWorkspace(self._table)
def _checkDWF(self, wsoutput, temperature):
if temperature == 0.0:
integral = 0.5
elif temperature == 293.0:
integral = 4.736767162094296 / 3.0
else:
raise RuntimeError("Unsupported temperature supplied to " +
"_checkDWF(). Use 0K or 293K only.")
y_sum = sum(self._input_ws.readY(1)[27:75])
e_sum = np.sqrt(sum(np.square(self._input_ws.readE(1)[27:75])))
mvan = 0.001*50.942/N_A
Bcoef = 3.0*integral*1e+20*hbar*hbar/(2.0*mvan*k*389.0)
dwf = np.exp(
-1.0*Bcoef*(4.0*np.pi*np.sin(0.5*np.radians(15.0))/4.0)**2)
self.assertAlmostEqual(y_sum/dwf, wsoutput.readY(1)[0])
self.assertAlmostEqual(e_sum/dwf, wsoutput.readE(1)[0])
if __name__ == "__main__":
unittest.main()
| wdzhou/mantid | Framework/PythonInterface/test/python/plugins/algorithms/ComputeCalibrationCoefVanTest.py | Python | gpl-3.0 | 6,573 | [
"Gaussian"
] | bda4122bf80d3ff88bb4c789d2f5f8630ecd659a897db02700efaac4826019dc |
"""
Convert a miniast to an XML document using ElementTree. This allows us to
write XPath unit tests, or just serialize the AST.
"""
__all__ = ['etree', 'tostring', 'XMLDumper']
import miniutils
try:
from lxml import etree
have_lxml = True
except ImportError:
have_lxml = False
try:
# Python 2.5
from xml.etree import cElementTree as etree
except ImportError:
try:
# Python 2.5
from xml.etree import ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
etree = miniutils.UnavailableImport("elementtree")
import minivisitor
class XMLDumper(minivisitor.PrintTree):
loop_level = 0
def visit_FunctionNode(self, node):
self.treebuilder = etree.TreeBuilder()
self.visit_Node(node)
return self.treebuilder.close()
def start(self, node, attrs={}):
name = type(node).__name__
format_value = self.format_value(node)
if format_value:
attrs = dict(attrs,
value=format_value,
id=hex(id(node)),
type=node.type)
attrs = dict((k, str(v)) for k, v in attrs.iteritems())
self.treebuilder.start(name, attrs)
return name
def visit_BinaryOperationNode(self, node):
name = self.start(node)
self.treebuilder.start('lhs', {})
self.visit(node.lhs)
self.treebuilder.end('lhs')
self.treebuilder.start('rhs', {})
self.visit(node.rhs)
self.treebuilder.end('rhs')
self.treebuilder.end(name)
def visit_ForNode(self, node):
attrs = dict(loop_level=self.loop_level,
is_fixup=node.is_fixup,
is_controlling_loop=node.is_controlling_loop,
is_tiling_loop=node.is_tiling_loop)
self.loop_level += 1
self.visit_Node(node, attrs)
self.loop_level -= 1
def visit_Node(self, node, attrs={}):
name = self.start(node, attrs)
self.visitchildren(node)
self.treebuilder.end(name)
def tostring(xml_root_element):
et = etree.ElementTree(xml_root_element)
kw = {}
if have_lxml:
kw['pretty_print'] = True
return etree.tostring(et, encoding='UTF-8', **kw) | markflorisson/minivect | minivect/xmldumper.py | Python | bsd-2-clause | 2,599 | [
"VisIt"
] | 471ecc2adcce19deacd1a281d3e3798e4ed574ce4147b4dda04a3449822d213f |
from paraview.simple import *
import sys
from paraview import smtesting
resolution = 15
cone = Cone(Resolution=resolution)
if cone.Resolution != resolution:
raise smtesting.TestError('Test failed: Resolution has not been set properly.')
resolution = 12
cone.Resolution = resolution
if cone.Resolution != resolution:
raise smtesting.TestError('Test failed: Problem changing resolution.')
cone.Center = [3.1, 4.2, 5.5]
if cone.Center[0] != 3.1 or cone.Center[1] != 4.2 or cone.Center[2] != 5.5:
raise smtesting.TestError('Test failed: Problem setting center of cone.')
shrinkFilter = Shrink(cone)
if shrinkFilter.Input.GetAddressAsString('') != cone.GetAddressAsString(''):
raise smtesting.TestError('Test failed: Pipeline not properly set.')
shrinkFilter.UpdatePipeline()
if shrinkFilter.GetDataInformation().GetNumberOfCells() != resolution+1 or shrinkFilter.GetDataInformation().GetNumberOfPoints() != resolution*4:
raise smtesting.TestError('Test failed: Pipeline not operating properly.')
resolution = 33
rp = cone.GetProperty("Resolution")
rp.SetElement(0, resolution)
cone.UpdateProperty("Resolution")
shrinkFilter.UpdatePipeline()
if shrinkFilter.GetDataInformation().GetNumberOfCells() != resolution+1 or shrinkFilter.GetDataInformation().GetNumberOfPoints() != resolution*4:
raise smtesting.TestError('Test failed: Problem setting property directly.')
Show(shrinkFilter)
ren = Render()
if not smtesting.DoRegressionTesting(ren.SMProxy):
raise smtesting.TestError('Image comparison failed.')
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/ParaViewCore/ServerManager/Default/Testing/Python/PythonPVSimpleCone.py | Python | gpl-3.0 | 1,537 | [
"ParaView"
] | 30fe9c78b0585827eddc2bcfb73bab4f9e747529f2b02b573f3ae6ba197b3174 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
LinearDiscriminantAnalysis
Classifier implementing Discriminant Analysis (Linear) classification
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LinearDiscriminantAnalysisClassifier(ScikitLearnBase):
"""
KNeighborsClassifier
Classifier implementing the k-nearest neighbors vote.
"""
info = {'problemtype':'classification', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.discriminant_analysis
self.model = sklearn.discriminant_analysis.LinearDiscriminantAnalysis
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LinearDiscriminantAnalysisClassifier, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LinearDiscriminantAnalysisClassifier} is a classifier with a linear decision boundary,
generated by fitting class conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input by projecting it to the most discriminative
directions, using the transform method.
\zNormalizationNotPerformed{LinearDiscriminantAnalysisClassifier}
"""
specs.addSub(InputData.parameterInputFactory("solver", contentType=InputTypes.StringType,
descr=r"""Solver to use, possible values:
\begin{itemize}
\item svd: Singular value decomposition (default). Does not compute the covariance matrix,
therefore this solver is recommended for data with a large number of features.
\item lsqr: Least squares solution. Can be combined with shrinkage or custom covariance estimator.
\item eigen: Eigenvalue decomposition. Can be combined with shrinkage or custom covariance estimator.
\end{itemize}
""", default='svd'))
specs.addSub(InputData.parameterInputFactory("Shrinkage", contentType=InputTypes.FloatOrStringType,
descr=r"""Shrinkage parameter, possible values: 1) None: no shrinkage (default),
2) `auto': automatic shrinkage using the Ledoit-Wolf lemma,
3) float between 0 an d1: fixed shrinkage parameter.
This should be left to None if covariance_estimator is used. Note that shrinkage works
only with `lsqr' and `eigen' solvers.""", default=None))
specs.addSub(InputData.parameterInputFactory("priors", contentType=InputTypes.FloatListType,
descr=r"""The class prior probabilities. By default, the class proportions are inferred from the training data.""", default=None))
specs.addSub(InputData.parameterInputFactory("n_components", contentType=InputTypes.IntegerType,
descr=r"""Number of components (<= min(n\_classes - 1, n\_features)) for dimensionality reduction.
If None, will be set to min(n\_classes - 1, n\_features). This parameter only affects the transform
method.""", default=None))
specs.addSub(InputData.parameterInputFactory("store_covariance", contentType=InputTypes.BoolType,
descr=r"""If True, explicitely compute the weighted within-class covariance matrix when solver
is `svd'. The matrix is always computed and stored for the other solvers.""", default=False))
specs.addSub(InputData.parameterInputFactory("tol", contentType=InputTypes.FloatType,
descr=r"""Absolute threshold for a singular value of X to be considered significant, used to estimate the rank of X.
Dimensions whose singular values are non-significant are discarded. Only used if solver is `svd'.""", default=1.0e-4))
specs.addSub(InputData.parameterInputFactory("covariance_estimator", contentType=InputTypes.IntegerType,
descr=r"""covariance estimator (not supported)""", default=None))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['solver', 'Shrinkage', 'priors',
'n_components', 'store_covariance','tol', 'covariance_estimator'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
| joshua-cogliati-inl/raven | framework/SupervisedLearning/ScikitLearn/DiscriminantAnalysis/LinearDiscriminantAnalysis.py | Python | apache-2.0 | 7,028 | [
"Gaussian"
] | 48651e9764bbd783061f875fb5f51c337b0f51f9c5db44ad64cdc0ed6ea2a040 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import textwrap
from . import constants, geometry, helper
from .config import Config
class Translator:
def __init__(self, language=Config().default_language):
if language in constants.supported_language_list:
self.language = language
else:
self.language = Config().default_language
def translate(self, category, tag):
if self.language == "de":
translation = self.translate_to_german(category, tag)
else:
translation = self.translate_to_english(category, tag)
return translation
def translate_to_german(self, category, tag):
if category == "message":
if tag == "map_not_available_or_incompatible": return "Karte ist nicht verfügbar oder inkompatibel"
if tag == "database_error": return "Fehler bei der Datenbankabfrage"
if tag == "no_route_options": return "Keine Routenoptionen übermittelt"
if tag == "no_route_factor_option": return "Der Faktor für die Berechnung des Umweges fehlt"
if tag == "no_session_id_option": return "Die Session Id fehlt"
if tag == "old_request_still_running": return "Eine Anfrage von Ihnen wird bereits bearbeitet. Sie können in 3 Minuten fortfahren."
if tag == "server_busy": return "Der Server ist gerade ausgelastet, Bitte versuchen Sie es später erneut."
if tag == "no_source_route": return "Keine Route übermittelt"
if tag == "source_route_incomplete": return "Die übermittelte Routinganfrage ist unvollständig"
if tag == "no_start_point": return "Kein Startpunkt übermittelt"
if tag == "start_point_no_name": return "Dem übermittelten Startpunkt fehlt der Name"
if tag == "start_point_no_latitude": return "Dem übermittelten Startpunkt fehlt der Breitengrad"
if tag == "start_point_no_longitude": return "Dem übermittelten Startpunkt fehlt der Längengrad"
if tag == "start_point_no_type": return "Dem übermittelten Startpunkt fehlt der Typ"
if tag == "no_way_id": return "Keine Weg-Id übermittelt"
if tag == "no_node_id": return "Keine Node-Id übermittelt"
if tag == "no_next_node_id": return "Keine Next-Node-Id übermittelt"
if tag == "node_id_invalid": return "Die übermittelte Wegpunkt-Id ist ungültig"
if tag == "way_id_invalid": return "Die übermittelte Weg-Id ist ungültig"
if tag == "source_route_no_transport_parts": return "Der Route fehlt ein, per ÖPNV zurückzulegender Routenabschnitt"
if tag == "source_route_multiple_transport_parts": return "Die Route beinhaltet mehr als einen, per ÖPNV zurückzulegenden Routenabschnitt"
if tag == "no_address_string": return "Keine Adresse übermittelt"
if tag == "address_invalid": return "Für die gegebenen Adresse sind keine Koordinaten ermittelbar"
if tag == "no_latitude_value": return "Kein Breitengrad übermittelt"
if tag == "no_longitude_value": return "Kein Längengrad übermittelt"
if tag == "no_range_value": return "Kein Wert für den Umkreis übermittelt"
if tag == "no_tags_value": return "Keine POI Tags übermittelt"
if tag == "no_public_transport_provider": return "Kein ÖPNV Anbieter übermittelt"
if tag == "no_address_for_this_coordinates": return "Für die übergebenen Koordinaten konnte keine Adresse ermittelt werden"
if tag == "no_station_for_this_coordinates": return "Die Haltestelle konnte nicht ermittelt werden"
if tag == "bahn_server_down": return "Der Server der Deutschen Bahn ist nicht erreichbar"
if tag == "no_bug_report_file_name": return "Kein Dateiname für den Fehlerbericht übertragen"
if tag == "no_bug_report_contents": return "Der Fehlerbericht ist leer"
if tag == "process_canceled": return "Prozess abgebrochen"
if category == "footway_creator":
if tag == "first_segment": return "Namenloses Wegsegment"
if tag == "last_segment": return "Namenloses Wegsegment"
if tag == "via_point_label": return "Zwischenziel %d: %s"
if tag == "route_description_without_transport": return textwrap.dedent("""\
Die Route ist %d Meter lang und besteht aus %d Kreuzungen.""")
if tag == "route_description_with_single_transport": return textwrap.dedent("""\
Die Route ist %d Meter lang. Sie besteht aus %d Kreuzungen und einer Fahrt mit dem ÖPNV.
Abfahrt in %d Minuten von der Station %s mit der Linie %s nach %s.""")
if tag == "route_description_with_multi_transport": return textwrap.dedent("""\
Die Route ist %d Meter lang. Sie besteht aus %d Kreuzungen und %d Fahrten mit dem ÖPNV.
Erste Abfahrt in %d Minuten von der Station %s mit der Linie %s nach %s.""")
if category == "transport_creator":
if tag == "footway_place_holder": return "Platzhalter für eine zu Fuß zurückzulegende Strecke"
if tag == "same_station": return "Weiterfahrt von gleicher Haltestelle"
if tag == "opposite_station": return "Weiterfahrt von gegenüberliegender Haltestelle"
if tag == "nearby_station": return "Weiterfahrt von benachbarter Haltestelle"
if tag == "nearby_station_no_exact_pos": return "Weiterfahrt von benachbarter Haltestelle, genaue Haltestellenposition nicht bekannt"
if tag == "within_station": return "Weiterfahrt innerhalb dieser Station"
if tag == "different_station": return "Weiterfahrt von anderer Station"
if tag == "transport_route_description": return textwrap.dedent("""\
Abfahrt in %d Minuten, Fahrzeit: %d Minuten, %d mal umsteigen,
Verkehrsmittel: %s, Fußweg: %d Meter""")
if tag == "transport_route_description_no_time": return textwrap.dedent("""\
Abfahrt in %d Minuten, Fahrzeit: %d Minuten, %d mal umsteigen (wenig Zeit),
Verkehrsmittel: %s, Fußweg: %d Meter""")
if category == "poi":
if tag == "address": return "Adresse"
if tag == "hiking_trail": return "Wanderweg"
if tag == "way_point": return "Wegpunkt"
if category == "smoothness":
if tag == "excellent": return "Sehr gut"
if tag == "good": return "Gut"
if tag == "intermediate": return "Mittelmäßig"
if tag == "bad": return "Schlecht"
if tag == "very_bad": return "Schlecht"
if tag == "horrible": return "Sehr schlecht"
if tag == "very_horrible": return "Sehr schlecht"
if tag == "impassable": return "Für Fahrzeuge unpassierbar"
if category == "surface":
if tag == "unpaved": return "Unbefestigt"
if tag == "paved": return "befestigt"
if tag == "asphalt": return "Asphalt"
if tag == "cobblestone:flattened": return "Kopfsteinpflaster"
if tag == "cobblestone": return "Kopfsteinpflaster"
if tag == "concrete:plates": return "Betonspurplatten"
if tag == "concrete:lanes": return "Betonspurplatten"
if tag == "concrete": return "Beton"
if tag == "paving_stones:20": return "Pflastersteine"
if tag == "paving_stones:30": return "Pflastersteine"
if tag == "paving_stones": return "Pflastersteine"
if tag == "sett": return "Pflastersteine"
if tag == "compacted": return "Befestigter Kies"
if tag == "fine_gravel": return "Befestigter Kies"
if tag == "earth": return "Trampelpfad"
if tag == "ground": return "Trampelpfad"
if tag == "grass_paver": return "Rasengittersteine"
if tag == "grass": return "Gras"
if tag == "metal": return "Metall"
if tag == "gravel": return "Grober Schotter"
if tag == "pebblestone": return "Kies"
if tag == "sand": return "Sand"
if tag == "mud": return "Matsch"
if tag == "ice": return "Eis"
if tag == "snow": return "Schnee"
if tag == "wood": return "Holz"
if tag == "tartan": return "Tartanbelag"
if tag == "artificial_turf": return "Kunstrasen"
if tag == "decoturf": return "Kunstrasen"
if tag == "clay": return "Ascheplatz"
if category == "aeroway":
if tag == "aerodrome": return "Flughafen"
if tag == "terminal": return "Flughafenterminal"
if category == "amenity":
if tag == "animal_boarding": return "Tierheim"
if tag == "animal_shelter": return "Tierheim"
if tag == "arts_centre": return "Kulturzentrum"
if tag == "atm": return "Geldautomat"
if tag == "auditorium": return "Hörsaal"
if tag == "bank": return "Bank"
if tag == "bar": return "Bar"
if tag == "bbq": return "Grillplatz"
if tag == "bench": return "Bank"
if tag == "bicycle_parking": return "Fahrradstellplatz"
if tag == "bicycle_rental": return "Fahrradverleih"
if tag == "brothel": return "Bordell"
if tag == "bureau_de_change": return "Wechselstube"
if tag == "bus_station": return "Busbahnhof"
if tag == "cafe": return "Café"
if tag == "car_rental": return "Autovermietung"
if tag == "car_sharing": return "Carsharing"
if tag == "car_wash": return "Autowaschanlage"
if tag == "casino": return "Casino"
if tag == "cinema": return "Kino"
if tag == "clinic": return "Krankenhaus"
if tag == "club": return "Verein"
if tag == "college": return "Hochschule"
if tag == "community_centre": return "Gemeinschaftszentrum"
if tag == "courthouse": return "Gericht"
if tag == "crematorium": return "Krematorium"
if tag == "dentist": return "Zahnarzt"
if tag == "doctors": return "Arzt"
if tag == "dormitory": return "Studentenwohnheim"
if tag == "drinking_water": return "Trinkwasser"
if tag == "driving_school": return "Fahrschule"
if tag == "embassy": return "Botschaft"
if tag == "emergency_phone": return "Notrufsäule"
if tag == "fast_food": return "Schnellimbiss"
if tag == "ferry_terminal": return "Fähranlegestelle"
if tag == "fire_hydrant": return "Hydrant"
if tag == "fire_station": return "Feuerwehr"
if tag == "fountain": return "Springbrunnen"
if tag == "fuel": return "Tankstelle"
if tag == "grave_yard": return "Friedhof"
if tag == "gym": return "Turnhalle"
if tag == "hall": return "Halle"
if tag == "health_centre": return "Gesundheitszentrum"
if tag == "hospital": return "Krankenhaus"
if tag == "hotel": return "Hotel"
if tag == "hunting_stand": return "Hochstand"
if tag == "ice_cream": return "Eisdiele"
if tag == "kindergarten": return "Kindergarten"
if tag == "kneipp_water_cure": return "Kneipp"
if tag == "library": return "Bücherei"
if tag == "market": return "Markt"
if tag == "marketplace": return "Marktplatz"
if tag == "mountain_rescue": return "Bergrettung"
if tag == "nightclub": return "Nachtklub"
if tag == "nursery": return "Kinderkrippe"
if tag == "nursing_home": return "Pflegeheim"
if tag == "office": return "Büro"
if tag == "park": return "Park"
if tag == "parking": return "Parkplatz"
if tag == "pharmacy": return "Apotheke"
if tag == "place_of_worship": return "Andachtsstätte"
if tag == "police": return "Polizei"
if tag == "post_box": return "Briefkasten"
if tag == "post_office": return "Postamt"
if tag == "preschool": return "Vorschule"
if tag == "prison": return "Gefängnis"
if tag == "pub": return "Kneipe"
if tag == "public_building": return "öffentliches Gebäude"
if tag == "public_market": return "öffentlicher Markt"
if tag == "reception_area": return "Empfangsbereich"
if tag == "recycling": return "Recyclingeinrichtung"
if tag == "restaurant": return "Restaurant"
if tag == "retirement_home": return "Altersheim"
if tag == "sauna": return "Sauna"
if tag == "school": return "Schule"
if tag == "shelter": return "Schutzhütte"
if tag == "shop": return "Geschäft"
if tag == "shopping": return "Einkaufszentrum"
if tag == "social_club": return "Geselligkeitsverein"
if tag == "studio": return "Studio"
if tag == "supermarket": return "Supermarkt"
if tag == "swingerclub": return "Swingerclub"
if tag == "taxi": return "Taxistand"
if tag == "telephone": return "Telefonzelle"
if tag == "theatre": return "Theater"
if tag == "toilets": return "WC"
if tag == "townhall": return "Rathaus"
if tag == "university": return "Universität"
if tag == "vending_machine": return "Automat"
if tag == "veterinary": return "Tierarzt"
if tag == "village_hall": return "Gemeindezentrum"
if tag == "waste_basket": return "Mülleimer"
if tag == "wifi": return "WLAN-Access-Point"
if tag == "youth_centre": return "Jugendzentrum"
if category == "building":
if tag == "apartments": return "Wohnblock"
if tag == "block": return "Gebäudeblock"
if tag == "bunker": return "Bunker"
if tag == "chapel": return "Kapelle"
if tag == "church": return "Kirche"
if tag == "city_hall": return "Rathaus"
if tag == "commercial": return "Gewerbegebäude"
if tag == "dormitory": return "Studentenwohnheim"
if tag == "entrance": return "Eingang"
if tag == "faculty": return "Ausbildungsgebäude"
if tag == "farm": return "Bauernhof"
if tag == "flats": return "Wohnung"
if tag == "garage": return "Garage"
if tag == "hall": return "Halle"
if tag == "hospital": return "Spital"
if tag == "hotel": return "Hotel"
if tag == "house": return "Haus"
if tag == "industrial": return "Industriegebäude"
if tag == "office": return "Bürogebäude"
if tag == "public": return "öffentliches Gebäude"
if tag == "residential": return "Wohngebäude"
if tag == "retail": return "Einzelhandelsgebäude"
if tag == "school": return "Schulgebäude"
if tag == "shop": return "Geschäft"
if tag == "stadium": return "Stadion"
if tag == "store": return "Warenhäuser"
if tag == "terrace": return "Terrasse"
if tag == "tower": return "Turm"
if tag == "train_station": return "Bahnhof"
if tag == "university": return "Universitätsgebäude"
if tag == "building": return "Gebäude"
if category == "entrance":
if tag == "yes": return "Eingang"
if tag == "main": return "Haupteingang"
if tag == "service": return "Hintereingang"
if tag == "emergency": return "Notausgang"
if tag == "exit": return "Nur Ausgang"
if tag == "subway_entrance": return "U-Bahn-Zugang"
if tag == "entrance": return "Eingang"
if category == "highway":
if tag == "bridleway": return "Reitweg"
if tag == "bus_guideway": return "Busspur"
if tag == "bus_stop": return "Bushaltestelle"
if tag == "byway": return "Umgehungsstraße"
if tag == "construction": return "Straße im Bau"
if tag == "cycleway": return "Radweg"
if tag == "distance_marker": return "Kilometerstein"
if tag == "emergency_access_point": return "Notrufpunkt"
if tag == "footway": return "Fußweg"
if tag == "ford": return "Furt"
if tag == "gate": return "Tor"
if tag == "living_street": return "Spielstraße"
if tag == "minor": return "Nebenstraße"
if tag == "motorway": return "Autobahn"
if tag == "motorway_junction": return "Autobahnkreuz"
if tag == "motorway_link": return "Autobahnauffahrt"
if tag == "path": return "Pfad"
if tag == "pedestrian": return "Fußgängerzone"
if tag == "platform": return "Plattform"
if tag == "primary": return "Primärstraße"
if tag == "primary_link": return "Primärauffahrt"
if tag == "raceway": return "Rennweg"
if tag == "residential": return "Wohnstraße"
if tag == "road": return "Straße"
if tag == "secondary": return "Landstraße"
if tag == "secondary_link": return "Landstraße"
if tag == "service": return "Anliegerstraße"
if tag == "services": return "Autobahnraststätte"
if tag == "steps": return "Treppe"
if tag == "stile": return "Zaunübertritt"
if tag == "tertiary": return "Nebenstraße"
if tag == "tertiary_link": return "Nebenstraße"
if tag == "track": return "Feldweg"
if tag == "trail": return "Pfad"
if tag == "trunk": return "Fernverkehrsstraße"
if tag == "trunk_link": return "Fernstraßenauffahrt"
if tag == "unclassified": return "Landstraße"
if tag == "unsurfaced": return "nichtasphaltierte Straße"
if tag == "crossing": return "Kreuzung"
if tag == "roundabout": return "Kreisverkehr"
if tag == "traffic_signals": return "Ampelkreuzung"
if category == "railway":
if tag == "abandoned": return "aufgelassene Bahnstrecke"
if tag == "construction": return "Eisenbahn im Bau"
if tag == "crossing": return "Bahnübergang"
if tag == "disused": return "unbenutzte Bahnstrecke"
if tag == "disused_station": return "unbenutzer Bahnhof"
if tag == "funicular": return "Seilbahn"
if tag == "halt": return "Haltepunkt"
if tag == "historic_station": return "historischer Bahnhof"
if tag == "junction": return "Bahnknotenpunkt"
if tag == "level_crossing": return "Eisenbahnkreuzung"
if tag == "light_rail": return "S-Bahn"
if tag == "miniature": return "Miniaturbahn"
if tag == "monorail": return "Einschienenbahn"
if tag == "narrow_gauge": return "Schmalspurbahn"
if tag == "platform": return "Bahnsteig"
if tag == "preserved": return "bewahrte Bahnstrecke"
if tag == "rail": return "Eisenbahn"
if tag == "spur": return "Bahnstrecke"
if tag == "station": return "Bahnhof"
if tag == "subway_entrance": return "U-Bahn-Zugang"
if tag == "subway": return "U-Bahn"
if tag == "switch": return "Weiche"
if tag == "tram": return "Straßenbahn"
if tag == "tram_stop": return "Straßenbahnhaltestelle"
if tag == "yard": return "Rangierbahnhof"
if category == "public_transport":
if tag == "bus": return "Bushaltestelle"
if tag == "tram": return "Straßenbahnhaltestelle"
if tag == "train": return "Bahnhof"
if tag == "light_rail": return "S-Bahnhof"
if tag == "subway": return "U-Bahnhof"
if tag == "ferry": return "Fährstation"
if tag == "aerialway": return "Liftstation"
if tag == "unknown": return "Haltestelle"
if category == "bridge":
if tag == "yes": return "Brücke"
if category == "crossing":
if tag == "traffic_signals": return "Fußgängerampel"
if tag == "pelican": return "Fußgängerampel"
if tag == "toucan": return "Fußgängerampel"
if tag == "island": return "Verkehrsinsel"
if tag == "uncontrolled": return "Zebrastreifen"
if tag == "zebra": return "Zebrastreifen"
if tag == "unmarked": return "Ungeregelte Straßenquerung"
if tag == "unknown": return "Straßenquerung"
if category == "historic":
if tag == "archaeological_site": return "Ausgrabungsstätte"
if tag == "battlefield": return "Kampfgebiet"
if tag == "boundary_stone": return "Grenzstein"
if tag == "building": return "historisches Gebäude"
if tag == "castle": return "Schloss"
if tag == "church": return "Kirche"
if tag == "house": return "historisches Haus"
if tag == "icon": return "Symbol"
if tag == "manor": return "Gut"
if tag == "memorial": return "Gedenkstätte"
if tag == "mine": return "historisches Bergwerk"
if tag == "monument": return "Monument"
if tag == "museum": return "Museum"
if tag == "ruins": return "Ruine"
if tag == "tower": return "Turm"
if tag == "wayside_cross": return "Wegkreuz"
if tag == "wayside_shrine": return "Schrein"
if tag == "wreck": return "Schiffswrack"
if tag == "yes": return "Historischer Ort"
if category == "landuse":
if tag == "allotments": return "Kleingartenanlage"
if tag == "basin": return "Becken"
if tag == "brownfield": return "Brachfläche"
if tag == "cemetery": return "Friedhof"
if tag == "commercial": return "Gewerbegebiet"
if tag == "conservation": return "Naturschutzgebiet"
if tag == "construction": return "Baustelle"
if tag == "farm": return "Bauernhof"
if tag == "farmland": return "Ackerland"
if tag == "farmyard": return "Hof"
if tag == "forest": return "Wald"
if tag == "grass": return "Rasenfläche"
if tag == "greenfield": return "unbebaute Fläche"
if tag == "industrial": return "Industriegebiet"
if tag == "landfill": return "Müllhalde"
if tag == "meadow": return "Wiese"
if tag == "military": return "Militärgebiet"
if tag == "mine": return "Bergwerk"
if tag == "mountain": return "Berg"
if tag == "nature_reserve": return "Naturschutzgebiet"
if tag == "park": return "Park"
if tag == "piste": return "Piste"
if tag == "plaza": return "großer Platz"
if tag == "quarry": return "Steinbruch"
if tag == "railway": return "Eisenbahn"
if tag == "recreation_ground": return "Naherholungsgebiet"
if tag == "reservoir": return "Reservoir"
if tag == "residential": return "Siedlung"
if tag == "retail": return "Einzelhandel"
if tag == "village_green": return "Grünanlage"
if tag == "vineyard": return "Weingut"
if tag == "wetland": return "Feuchtgebiet"
if tag == "wood": return "Wald"
if category == "leisure":
if tag == "beach_resort": return "Freibad"
if tag == "common": return "Gemeindeland"
if tag == "fishing": return "Fischereigrund"
if tag == "garden": return "Garten"
if tag == "golf_course": return "Golfplatz"
if tag == "hackerspace": return "Hackerspace"
if tag == "ice_rink": return "Eislaufplatz"
if tag == "marina": return "Yachthafen"
if tag == "miniature_golf": return "Minigolf"
if tag == "nature_reserve": return "Naturschutzgebiet"
if tag == "park": return "Park"
if tag == "pitch": return "Sportplatz"
if tag == "playground": return "Spielplatz"
if tag == "recreation_ground": return "Naherholungsgebiet"
if tag == "slipway": return "Slipanlage"
if tag == "sports_centre": return "Sportzentrum"
if tag == "stadium": return "Stadion"
if tag == "swimming_pool": return "Schwimmbecken"
if tag == "track": return "Rennstrecke"
if tag == "water_park": return "Wasserpark"
if category == "man_made":
if tag == "surveillance": return "Überwachungskamera"
if category == "natural":
if tag == "bay": return "Bucht"
if tag == "beach": return "Strand"
if tag == "cape": return "Kap"
if tag == "cave_entrance": return "Höhleneingang"
if tag == "channel": return "Kanal"
if tag == "cliff": return "Klippe"
if tag == "coastline": return "Küstenlinie"
if tag == "crater": return "Krater"
if tag == "feature": return "Merkmal"
if tag == "fell": return "kahler Berg"
if tag == "fjord": return "Fjord"
if tag == "geyser": return "Geysir"
if tag == "glacier": return "Gletscher"
if tag == "heath": return "Heide"
if tag == "hill": return "Erhebung"
if tag == "island": return "Insel"
if tag == "land": return "Land"
if tag == "marsh": return "Marschland"
if tag == "moor": return "Moor"
if tag == "mud": return "Schlamm"
if tag == "peak": return "Gipfel"
if tag == "point": return "Punkt"
if tag == "reef": return "Riff"
if tag == "ridge": return "Grat"
if tag == "river": return "Fluss"
if tag == "rock": return "Stein"
if tag == "scree": return "Geröll"
if tag == "scrub": return "Busch"
if tag == "shoal": return "Untiefe"
if tag == "spring": return "Quelle"
if tag == "strait": return "Straße"
if tag == "tree": return "Baum"
if tag == "valley": return "Tal"
if tag == "volcano": return "Vulkan"
if tag == "water": return "Wasser"
if tag == "wetland": return "Feuchtgebiet"
if tag == "wetlands": return "Feuchtgebiet"
if tag == "wood": return "Wald"
if category == "place":
if tag == "city": return "Stadt"
if tag == "country": return "Land"
if tag == "county": return "Bezirk"
if tag == "farm": return "Gehöft"
if tag == "hamlet": return "Weiler"
if tag == "house": return "Haus"
if tag == "houses": return "Häuser"
if tag == "island": return "Insel"
if tag == "islet": return "Inselchen"
if tag == "locality": return "Ortschaft"
if tag == "moor": return "Moor"
if tag == "municipality": return "Gemeinde"
if tag == "postcode": return "Postleitzahl"
if tag == "region": return "Region"
if tag == "sea": return "Meer"
if tag == "state": return "Bundesland"
if tag == "subdivision": return "Untergruppe"
if tag == "suburb": return "Stadtteil"
if tag == "town": return "Stadt"
if tag == "unincorporated_area": return "gemeindefreies Gebiet"
if tag == "village": return "Dorf"
if category == "shop":
if tag == "alcohol": return "Spirituosenladen"
if tag == "apparel": return "Bekleidungsgeschäft"
if tag == "art": return "Kunstladen"
if tag == "bakery": return "Bäckerei"
if tag == "beauty": return "Parfümerie"
if tag == "beverages": return "Getränkemarkt"
if tag == "bicycle": return "Fahrradgeschäft"
if tag == "books": return "Buchgeschäft"
if tag == "butcher": return "Fleischerei"
if tag == "car": return "Autohaus"
if tag == "car_dealer": return "Autohaus"
if tag == "car_parts": return "Autoteilehändler"
if tag == "car_repair": return "Autowerkstatt"
if tag == "carpet": return "Teppichgeschäft"
if tag == "charity": return "Wohltätigkeitsladen"
if tag == "chemist": return "Drogerie"
if tag == "clothes": return "Bekleidungsgeschäft"
if tag == "computer": return "Computergeschäft"
if tag == "confectionery": return "Süßwarengeschäft"
if tag == "convenience": return "Tante-Emma-Laden"
if tag == "copyshop": return "Copyshop"
if tag == "cosmetics": return "Kosmetikgeschäft"
if tag == "department_store": return "Warenhäuser"
if tag == "discount": return "Diskontladen"
if tag == "doityourself": return "Baumarkt"
if tag == "drugstore": return "Drogerie"
if tag == "dry_cleaning": return "Textilreinigung"
if tag == "electronics": return "Elektronikgeschäft"
if tag == "estate_agent": return "Imobilienhändler"
if tag == "farm": return "Hofladen"
if tag == "fashion": return "Modegeschäft"
if tag == "fish": return "Angelladen"
if tag == "florist": return "Blumenladen"
if tag == "food": return "Lebensmittelladen"
if tag == "funeral_directors": return "Bestattungsunternehmen"
if tag == "furniture": return "Möbelgeschäft"
if tag == "gallery": return "Galerie"
if tag == "garden_centre": return "Gärtnerei"
if tag == "general": return "Gemischtwarenladen"
if tag == "gift": return "Geschenkeladen"
if tag == "greengrocer": return "Obst- und Gemüsehandlung"
if tag == "grocery": return "Lebensmittelladen"
if tag == "hairdresser": return "Frisör"
if tag == "hardware": return "Eisenwarenhandlung"
if tag == "hifi": return "Unterhaltungselektronikladen"
if tag == "insurance": return "Versicherungsbüro"
if tag == "jewelry": return "Juwelier"
if tag == "kiosk": return "Kiosk"
if tag == "laundry": return "Waschsalon"
if tag == "mall": return "Einkaufszentrum"
if tag == "market": return "Markt"
if tag == "mobile_phone": return "Handygeschäft"
if tag == "motorcycle": return "Motorradgeschäft"
if tag == "music": return "Musikladen"
if tag == "newsagent": return "Zeitschriftenladen"
if tag == "optician": return "Optiker"
if tag == "organic": return "Bioladen"
if tag == "outdoor": return "Outdoorladen"
if tag == "pet": return "Tierhandlung"
if tag == "photo": return "Fotoladen"
if tag == "salon": return "Salon"
if tag == "shoes": return "Schuhgeschäft"
if tag == "shopping_centre": return "Einkaufszentrum"
if tag == "sports": return "Sportgeschäft"
if tag == "stationery": return "Schreibwarenladen"
if tag == "supermarket": return "Supermarkt"
if tag == "toys": return "Spielwarengeschäft"
if tag == "travel_agency": return "Reisebüro"
if tag == "video": return "Videothek"
if tag == "wine": return "Weingeschäft"
if category == "tourism":
if tag == "alpine_hut": return "Berghütte"
if tag == "artwork": return "Kunstwerk"
if tag == "attraction": return "Attraktion"
if tag == "bed_and_breakfast": return "Unterkunft"
if tag == "cabin": return "Hütte"
if tag == "camp_site": return "Campingplatz"
if tag == "caravan_site": return "Wohnwagen-Stellplatz"
if tag == "chalet": return "Almhütte"
if tag == "guest_house": return "Pension"
if tag == "hostel": return "Jugendherberge"
if tag == "hotel": return "Hotel"
if tag == "information": return "Touristen-Information"
if tag == "lean_to": return "Anbau"
if tag == "motel": return "Motel"
if tag == "museum": return "Museum"
if tag == "picnic_site": return "Piknikplatz"
if tag == "theme_park": return "Vergnügungspark"
if tag == "valley": return "Tal"
if tag == "viewpoint": return "Aussichtspunkt"
if tag == "zoo": return "Zoo"
if category == "waterway":
if tag == "boatyard": return "Werft"
if tag == "canal": return "Kanal"
if tag == "connector": return "Wasserstraßenverbindung"
if tag == "dam": return "Staudamm"
if tag == "derelict_canal": return "Aufgelassener Kanal"
if tag == "ditch": return "Wassergraben"
if tag == "dock": return "Dock"
if tag == "drain": return "Entwässerungsgraben"
if tag == "lock": return "Schleuse"
if tag == "lock_gate": return "Schleusentor"
if tag == "mineral_spring": return "Mineralquelle"
if tag == "mooring": return "Anlegeplatz"
if tag == "rapids": return "Stromschnellen"
if tag == "river": return "Fluss"
if tag == "riverbank": return "Flussufer"
if tag == "stream": return "Bach"
if tag == "wadi": return "Trockental"
if tag == "water_point": return "Wasserpunkt"
if tag == "waterfall": return "Wasserfall"
if tag == "weir": return "Wehr"
return tag
def translate_to_english(self, category, tag):
if category == "message":
if tag == "map_not_available_or_incompatible": return "Map is not available or incompatible"
if tag == "database_error": return "Error during database access"
if tag == "no_route_options": return "No route options transmitted"
if tag == "no_route_factor_option": return "The factor for the route indirection calculation is missing"
if tag == "no_session_id_option": return "Missing session id"
if tag == "old_request_still_running": return "The server still calculates your prior request. Please wait at least 3 minutes."
if tag == "server_busy": return "Currently the server is busy. Please try again later."
if tag == "no_source_route": return "No route transmitted"
if tag == "source_route_incomplete": return "The transmitted route query is incomplete"
if tag == "no_start_point": return "No start point transmitted"
if tag == "start_point_no_name": return "The name of the start point is missing"
if tag == "start_point_no_latitude": return "The latitude value of the start point is missing"
if tag == "start_point_no_longitude": return "The longitude value of the start point is missing"
if tag == "start_point_no_type": return "The type value of the start point is missing"
if tag == "no_way_id": return "No Way-Id transmitted"
if tag == "no_node_id": return "No node id transmitted"
if tag == "no_next_node_id": return "No next node id transmitted"
if tag == "node_id_invalid": return "The transmitted Waypoint-Id is invalid"
if tag == "way_id_invalid": return "The transmitted Way-Id is invalid"
if tag == "source_route_no_transport_parts": return "The transmitted route lacks a public transport segment"
if tag == "source_route_multiple_transport_parts": return "The transmitted route consists of multiple public transport segments"
if tag == "no_address_string": return "No address string transmitted"
if tag == "address_invalid": return "Found no coordinates for the given address"
if tag == "no_latitude_value": return "The latitude value is missing"
if tag == "no_longitude_value": return "The longitude value is missing"
if tag == "no_range_value": return "The radius value is missing"
if tag == "no_tags_value": return "The poi tags are missing"
if tag == "no_public_transport_provider": return "The public transport provider is missing"
if tag == "no_address_for_this_coordinates": return "Found no address for the given coordinates"
if tag == "no_station_for_this_coordinates": return "Found no station for this coordinates"
if tag == "bahn_server_down": return "The server of the Deutsche Bahn is not accessible"
if tag == "no_bug_report_file_name": return "The bug report file name is missing"
if tag == "no_bug_report_contents": return "The bug report contents is missing"
if tag == "process_canceled": return "Process canceled"
if category == "footway_creator":
if tag == "first_segment": return "Nameless route segment"
if tag == "last_segment": return "Nameless route segment"
if tag == "via_point_label": return "Via point %d, %s"
if tag == "route_description_without_transport": return textwrap.dedent("""\
The total length of the route is %d meters and it consists of %d intersections""")
if tag == "route_description_with_single_transport": return textwrap.dedent("""\
The total length of the route is %d meters and it consists of %d intersections and one public transport part.
Departure in %d Minutes from the station %s with line %s to %s.""")
if tag == "route_description_with_multi_transport": return textwrap.dedent("""\
The total length of the route is %d meters and it consists of %d intersections and %d public transport parts.
Departure in %d Minutes from the station %s with line %s to %s.""")
if category == "transport_creator":
if tag == "footway_place_holder": return "Placeholder for a walking route segment"
if tag == "same_station": return "Continuation from same station"
if tag == "opposite_station": return "Continuation from opposite station"
if tag == "nearby_station": return "Continuation from nearby station"
if tag == "nearby_station_no_exact_pos": return "Continuation from nearby station, exact position unknown"
if tag == "within_station": return "Continuation within this station"
if tag == "different_station": return "Continuation from different station"
if tag == "transport_route_description": return textwrap.dedent("""\
Departure in %d minutes, Duration: %d minutes, %d changes,
Transportation vehicles: %s, Route length: %d meters""")
if tag == "transport_route_description_no_time": return textwrap.dedent("""\
Departure in %d minutes, Duration: %d minutes, %d changes (time limited),
Transportation vehicles: %s, Route length: %d meters""")
if category == "poi":
if tag == "address": return "Address"
if tag == "hiking_trail": return "Hiking trail"
if tag == "way_point": return "Waypoint"
if category == "smoothness":
if tag == "excellent": return "Excellent"
if tag == "good": return "Good"
if tag == "intermediate": return "Intermediate"
if tag == "bad": return "Bad"
if tag == "very_bad": return "Bad"
if tag == "horrible": return "Very bad"
if tag == "very_horrible": return "Very bad"
if tag == "impassable": return "Impassable for wheeled vehicles"
if category == "surface":
if tag == "unpaved": return "unpaved"
if tag == "paved": return "paved"
if tag == "asphalt": return "asphalt"
if tag == "cobblestone:flattened": return "cobblestone"
if tag == "cobblestone": return "cobblestone"
if tag == "concrete:lanes": return "concrete plates"
if tag == "concrete": return "concrete"
if tag == "paving_stones:20": return "paving stones"
if tag == "paving_stones:30": return "paving stones"
if tag == "paving_stones": return "paving stones"
if tag == "sett": return "paving stones"
if tag == "compacted": return "compacted gravel"
if tag == "fine_gravel": return "compacted gravel"
if tag == "earth": return "trail"
if tag == "ground": return "trail"
if tag == "grass_paver": return "grass paver"
if tag == "grass": return "grass"
if tag == "metal": return "metal"
if tag == "gravel": return "pebble stone"
if tag == "pebblestone": return "pebble stone"
if tag == "sand": return "sand"
if tag == "mud": return "mud"
if tag == "ice": return "ice"
if tag == "snow": return "snow"
if tag == "wood": return "wood"
if tag == "tartan": return "tartan surface"
if tag == "artificial_turf": return "artificial turf"
if tag == "decoturf": return "artificial turf"
if tag == "clay": return "tennis or soccer pitch surface"
if category == "aeroway":
if tag == "aerodrome": return "Airport"
if tag == "terminal": return "Airport terminal"
if category == "amenity":
if tag == "animal_boarding": return "Animal boarding facility"
if tag == "animal_shelter": return "Animal shelter"
if tag == "arts_centre": return "Arts Centre"
if tag == "atm": return "ATM"
if tag == "auditorium": return "Auditorium"
if tag == "bank": return "Bank"
if tag == "bar": return "Bar"
if tag == "bbq": return "Barbecue"
if tag == "bench": return "Bench"
if tag == "bicycle_parking": return "Cycle Parking"
if tag == "bicycle_rental": return "Cycle Rental"
if tag == "brothel": return "Brothel"
if tag == "bureau_de_change": return "Bureau de Change"
if tag == "bus_station": return "Bus Station"
if tag == "cafe": return "Cafe"
if tag == "car_rental": return "Car Rental"
if tag == "car_sharing": return "Car Share"
if tag == "car_wash": return "Car Wash"
if tag == "casino": return "Casino"
if tag == "cinema": return "Cinema"
if tag == "clinic": return "Clinic"
if tag == "club": return "Club"
if tag == "college": return "College"
if tag == "community_centre": return "Community Centre"
if tag == "courthouse": return "Courthouse"
if tag == "crematorium": return "Crematorium"
if tag == "dentist": return "Dentist"
if tag == "doctors": return "Doctor"
if tag == "dormitory": return "Dormitory"
if tag == "drinking_water": return "Drinking Water"
if tag == "driving_school": return "Driving School"
if tag == "embassy": return "Embassy"
if tag == "emergency_phone": return "Emergency Phone"
if tag == "fast_food": return "Fast Food"
if tag == "ferry_terminal": return "Ferry Terminal"
if tag == "fire_hydrant": return "Fire Hydrant"
if tag == "fire_station": return "Fire Station"
if tag == "fountain": return "Fountain"
if tag == "fuel": return "Fuel"
if tag == "grave_yard": return "Grave Yard"
if tag == "gym": return "Fitness Centre"
if tag == "hall": return "Hall"
if tag == "health_centre": return "Health Centre"
if tag == "hospital": return "Hospital"
if tag == "hotel": return "Hotel"
if tag == "hunting_stand": return "Hunting Stand"
if tag == "ice_cream": return "Ice Cream"
if tag == "kindergarten": return "Kindergarten"
if tag == "kneipp_water_cure": return "kneipp water cure"
if tag == "library": return "Library"
if tag == "market": return "Market"
if tag == "marketplace": return "Marketplace"
if tag == "mountain_rescue": return "Mountain Rescue"
if tag == "nightclub": return "Night Club"
if tag == "nursery": return "Nursery"
if tag == "nursing_home": return "Nursing Home"
if tag == "office": return "Office"
if tag == "park": return "Park"
if tag == "parking": return "Parking"
if tag == "pharmacy": return "Pharmacy"
if tag == "place_of_worship": return "Church"
if tag == "police": return "Police"
if tag == "post_box": return "Post Box"
if tag == "post_office": return "Post Office"
if tag == "preschool": return "Pre-School"
if tag == "prison": return "Prison"
if tag == "pub": return "Pub"
if tag == "public_building": return "Public Building"
if tag == "public_market": return "Public Market"
if tag == "reception_area": return "Reception Area"
if tag == "recycling": return "Recycling Point"
if tag == "restaurant": return "Restaurant"
if tag == "retirement_home": return "Retirement Home"
if tag == "sauna": return "Sauna"
if tag == "school": return "School"
if tag == "shelter": return "Shelter"
if tag == "shop": return "Shop"
if tag == "shopping": return "Shopping"
if tag == "social_club": return "Social Club"
if tag == "studio": return "Studio"
if tag == "supermarket": return "Supermarket"
if tag == "swingerclub": return "Swinger Club"
if tag == "taxi": return "Taxi"
if tag == "telephone": return "Telephone"
if tag == "theatre": return "Theatre"
if tag == "toilets": return "Toilet"
if tag == "townhall": return "Town Hall"
if tag == "university": return "University"
if tag == "vending_machine": return "Vending Machine"
if tag == "veterinary": return "Veterinary Surgery"
if tag == "village_hall": return "Village Hall"
if tag == "waste_basket": return "Waste Basket"
if tag == "wifi": return "WiFi Access"
if tag == "youth_centre": return "Youth Centre"
if category == "building":
if tag == "apartments": return "Apartment Block"
if tag == "block": return "Building Block"
if tag == "bunker": return "Bunker"
if tag == "chapel": return "Chapel"
if tag == "church": return "Church"
if tag == "city_hall": return "City Hall"
if tag == "commercial": return "Commercial Building"
if tag == "dormitory": return "Dormitory"
if tag == "entrance": return "Building Entrance"
if tag == "faculty": return "Faculty Building"
if tag == "farm": return "Farm Building"
if tag == "flats": return "Flats"
if tag == "garage": return "Garage"
if tag == "hall": return "Hall"
if tag == "hospital": return "Hospital Building"
if tag == "hotel": return "Hotel"
if tag == "house": return "House"
if tag == "industrial": return "Industrial Building"
if tag == "office": return "Office Building"
if tag == "public": return "Public Building"
if tag == "residential": return "Residential Building"
if tag == "retail": return "Retail Building"
if tag == "school": return "School Building"
if tag == "shop": return "Shop"
if tag == "stadium": return "Stadium"
if tag == "store": return "Store"
if tag == "terrace": return "Terrace"
if tag == "tower": return "Tower"
if tag == "train_station": return "Railway Station"
if tag == "university": return "University Building"
if tag == "building": return "Building"
if category == "entrance":
if tag == "yes": return "Entrance"
if tag == "main": return "Main Entrance"
if tag == "service": return "Service Entrance"
if tag == "emergency": return "Emergency Exit"
if tag == "exit": return "Only Exit"
if tag == "subway_entrance": return "Subway Entrance"
if tag == "entrance": return "Entrance"
if category == "highway":
if tag == "bridleway": return "Bridleway"
if tag == "bus_guideway": return "Guided Bus Lane"
if tag == "bus_stop": return "Bus Stop"
if tag == "byway": return "Byway"
if tag == "construction": return "Highway under Construction"
if tag == "cycleway": return "Cycle Path"
if tag == "distance_marker": return "Distance Marker"
if tag == "emergency_access_point": return "Emergency Access Point"
if tag == "footway": return "Footpath"
if tag == "ford": return "Ford"
if tag == "gate": return "Gate"
if tag == "living_street": return "Living Street"
if tag == "minor": return "Minor Road"
if tag == "motorway": return "Motorway"
if tag == "motorway_junction": return "Motorway Junction"
if tag == "motorway_link": return "Motorway Road"
if tag == "path": return "Path"
if tag == "pedestrian": return "Pedestrian Way"
if tag == "platform": return "Platform"
if tag == "primary": return "Primary Road"
if tag == "primary_link": return "Primary Road"
if tag == "raceway": return "Raceway"
if tag == "residential": return "Residential"
if tag == "rest_area": return "Rest Area"
if tag == "road": return "Road"
if tag == "secondary": return "Secondary Road"
if tag == "secondary_link": return "Secondary Road"
if tag == "service": return "Service Road"
if tag == "services": return "Service Area"
if tag == "steps": return "Steps"
if tag == "stile": return "Stile"
if tag == "tertiary": return "Tertiary Road"
if tag == "tertiary_link": return "Tertiary Road"
if tag == "track": return "Track"
if tag == "trail": return "Trail"
if tag == "trunk": return "Trunk Road"
if tag == "trunk_link": return "Trunk Road"
if tag == "unclassified": return "Unclassified Road"
if tag == "unsurfaced": return "Unsurfaced Road"
if tag == "crossing": return "Intersection"
if tag == "roundabout": return "Roundabout"
if tag == "traffic_signals": return "Intersection with traffic signals"
if category == "railway":
if tag == "abandoned": return "Abandoned Railway"
if tag == "construction": return "Railway under Construction"
if tag == "crossing": return "Pedestrian level crossing"
if tag == "disused": return "Disused Railway"
if tag == "disused_station": return "Disused Railway Station"
if tag == "funicular": return "Funicular Railway"
if tag == "halt": return "Train Stop"
if tag == "historic_station": return "Historic Railway Station"
if tag == "junction": return "Railway Junction"
if tag == "level_crossing": return "Level Crossing"
if tag == "light_rail": return "Light Rail"
if tag == "miniature": return "Miniature Railway"
if tag == "monorail": return "Monorail"
if tag == "narrow_gauge": return "Narrow Gauge Railway"
if tag == "platform": return "Railway Platform"
if tag == "preserved": return "Preserved Railway"
if tag == "rail": return "Railway"
if tag == "spur": return "Railway Spur"
if tag == "station": return "Railway Station"
if tag == "subway_entrance": return "Subway Entrance"
if tag == "subway": return "Subway Station"
if tag == "switch": return "Railway Points"
if tag == "tram": return "Tramway"
if tag == "tram_stop": return "Tram Stop"
if tag == "yard": return "Railway Yard"
if category == "public_transport":
if tag == "bus": return "Bus Stop"
if tag == "tram": return "Tram stop"
if tag == "train": return "Train Station"
if tag == "light_rail": return "Lightrail Station"
if tag == "subway": return "Subway Station"
if tag == "ferry": return "Ferry Station"
if tag == "aerialway": return "Aerial cableway station"
if tag == "unknown": return "Station"
if category == "bridge":
if tag == "yes": return "Bridge"
if category == "crossing":
if tag == "traffic_signals": return "Traffic signals"
if tag == "pelican": return "Traffic signals"
if tag == "toucan": return "Traffic signals"
if tag == "island": return "Traffic island"
if tag == "uncontrolled": return "Zebra crossing"
if tag == "zebra": return "Zebra crossing"
if tag == "unmarked": return "Unmarked crossing"
if tag == "unknown": return "Pedestrian crossing"
if category == "historic":
if tag == "archaeological_site": return "Archaeological Site"
if tag == "battlefield": return "Battlefield"
if tag == "boundary_stone": return "Boundary Stone"
if tag == "building": return "Building"
if tag == "castle": return "Castle"
if tag == "church": return "Church"
if tag == "house": return "House"
if tag == "icon": return "Icon"
if tag == "manor": return "Manor"
if tag == "memorial": return "Memorial"
if tag == "mine": return "Mine"
if tag == "monument": return "Monument"
if tag == "museum": return "Museum"
if tag == "ruins": return "Ruin"
if tag == "tower": return "Tower"
if tag == "wayside_cross": return "Wayside Cross"
if tag == "wayside_shrine": return "Wayside Shrine"
if tag == "wreck": return "Wreck"
if tag == "yes": return "Historic place"
if category == "landuse":
if tag == "allotments": return "Allotment"
if tag == "basin": return "Basin"
if tag == "brownfield": return "Brownfield Land"
if tag == "cemetery": return "Cemetery"
if tag == "commercial": return "Commercial Area"
if tag == "conservation": return "Conservation"
if tag == "construction": return "Construction"
if tag == "farm": return "Farm"
if tag == "farmland": return "Farmland"
if tag == "farmyard": return "Farmyard"
if tag == "forest": return "Forest"
if tag == "grass": return "Grass"
if tag == "greenfield": return "Greenfield Land"
if tag == "industrial": return "Industrial Area"
if tag == "landfill": return "Landfill"
if tag == "meadow": return "Meadow"
if tag == "military": return "Military Area"
if tag == "mine": return "Mine"
if tag == "mountain": return "Mountain"
if tag == "nature_reserve": return "Nature Reserve"
if tag == "park": return "Park"
if tag == "piste": return "Piste"
if tag == "plaza": return "Plaza"
if tag == "quarry": return "Quarry"
if tag == "railway": return "Railway"
if tag == "recreation_ground": return "Recreation Ground"
if tag == "reservoir": return "Reservoir"
if tag == "residential": return "Residential Area"
if tag == "retail": return "Retail"
if tag == "village_green": return "Village Green"
if tag == "vineyard": return "Vineyard"
if tag == "wetland": return "Wetland"
if tag == "wood": return "Wood"
if category == "leisure":
if tag == "beach_resort": return "Beach Resort"
if tag == "common": return "Common Land"
if tag == "fishing": return "Fishing Area"
if tag == "garden": return "Garden"
if tag == "golf_course": return "Golf Course"
if tag == "hackerspace": return "Hackerspace"
if tag == "ice_rink": return "Ice Rink"
if tag == "marina": return "Marina"
if tag == "miniature_golf": return "Miniature Golf"
if tag == "nature_reserve": return "Nature Reserve"
if tag == "park": return "Park"
if tag == "pitch": return "Sports Pitch"
if tag == "playground": return "Playground"
if tag == "recreation_ground": return "Recreation Ground"
if tag == "slipway": return "Slipway"
if tag == "sports_centre": return "Sports Centre"
if tag == "stadium": return "Stadium"
if tag == "swimming_pool": return "Swimming Pool"
if tag == "track": return "Running Track"
if tag == "water_park": return "Water Park"
if tag == "slipway": return "Boat Ramp"
if category == "man_made":
if tag == "surveillance": return "Surveillance camera"
if category == "natural":
if tag == "bay": return "Bay"
if tag == "beach": return "Beach"
if tag == "cape": return "Cape"
if tag == "cave_entrance": return "Cave Entrance"
if tag == "channel": return "Channel"
if tag == "cliff": return "Cliff"
if tag == "coastline": return "Coastline"
if tag == "crater": return "Crater"
if tag == "feature": return "Feature"
if tag == "fell": return "Fell"
if tag == "fjord": return "Fjord"
if tag == "geyser": return "Geyser"
if tag == "glacier": return "Glacier"
if tag == "heath": return "Heath"
if tag == "hill": return "Hill"
if tag == "island": return "Island"
if tag == "land": return "Land"
if tag == "marsh": return "Marsh"
if tag == "moor": return "Moor"
if tag == "mud": return "Mud"
if tag == "peak": return "Peak"
if tag == "point": return "Point"
if tag == "reef": return "Reef"
if tag == "ridge": return "Ridge"
if tag == "river": return "River"
if tag == "rock": return "Rock"
if tag == "scree": return "Scree"
if tag == "scrub": return "Scrub"
if tag == "shoal": return "Shoal"
if tag == "spring": return "Spring"
if tag == "strait": return "Strait"
if tag == "tree": return "Tree"
if tag == "valley": return "Valley"
if tag == "volcano": return "Volcano"
if tag == "water": return "Water"
if tag == "wetland": return "Wetland"
if tag == "wetlands": return "Wetlands"
if tag == "wood": return "Wood"
if category == "place":
if tag == "city": return "City"
if tag == "country": return "Country"
if tag == "county": return "County"
if tag == "farm": return "Farm"
if tag == "hamlet": return "Hamlet"
if tag == "house": return "House"
if tag == "houses": return "Houses"
if tag == "island": return "Island"
if tag == "islet": return "Islet"
if tag == "locality": return "Locality"
if tag == "moor": return "Moor"
if tag == "municipality": return "Municipality"
if tag == "postcode": return "Postcode"
if tag == "region": return "Region"
if tag == "sea": return "Sea"
if tag == "state": return "State"
if tag == "subdivision": return "Subdivision"
if tag == "suburb": return "Suburb"
if tag == "town": return "Town"
if tag == "unincorporated_area": return "Unincorporated Area"
if tag == "village": return "Village"
if category == "shop":
if tag == "alcohol": return "Off License"
if tag == "apparel": return "Apparel Shop"
if tag == "art": return "Art Shop"
if tag == "bakery": return "Bakery"
if tag == "beauty": return "Beauty Shop"
if tag == "beverages": return "Beverages Shop"
if tag == "bicycle": return "Bicycle Shop"
if tag == "books": return "Book Shop"
if tag == "butcher": return "Butcher"
if tag == "car": return "Car Shop"
if tag == "car_dealer": return "Car Dealer"
if tag == "car_parts": return "Car Parts"
if tag == "carpet": return "Carpet Shop"
if tag == "car_repair": return "Car Repair"
if tag == "charity": return "Charity Shop"
if tag == "chemist": return "Chemist"
if tag == "clothes": return "Clothes Shop"
if tag == "computer": return "Computer Shop"
if tag == "confectionery": return "Confectionery Shop"
if tag == "convenience": return "Convenience Store"
if tag == "copyshop": return "Copy Shop"
if tag == "cosmetics": return "Cosmetics Shop"
if tag == "department_store": return "Department Store"
if tag == "discount": return "Discount Items Shop"
if tag == "doityourself": return "Do-It-Yourself"
if tag == "drugstore": return "Drugstore"
if tag == "dry_cleaning": return "Dry Cleaning"
if tag == "electronics": return "Electronics Shop"
if tag == "estate_agent": return "Estate Agent"
if tag == "farm": return "Farm Shop"
if tag == "fashion": return "Fashion Shop"
if tag == "fish": return "Fish Shop"
if tag == "florist": return "Florist"
if tag == "food": return "Food Shop"
if tag == "funeral_directors": return "Funeral Director"
if tag == "furniture": return "Furniture"
if tag == "gallery": return "Gallery"
if tag == "garden_centre": return "Garden Centre"
if tag == "general": return "General Store"
if tag == "gift": return "Gift Shop"
if tag == "greengrocer": return "Greengrocer"
if tag == "grocery": return "Grocery Shop"
if tag == "hairdresser": return "Hairdresser"
if tag == "hardware": return "Hardware Store"
if tag == "hifi": return "Hi-Fi"
if tag == "insurance": return "Insurance"
if tag == "jewelry": return "Jewelry Shop"
if tag == "kiosk": return "Kiosk Shop"
if tag == "laundry": return "Laundry"
if tag == "mall": return "Mall"
if tag == "market": return "Market"
if tag == "mobile_phone": return "Mobile Phone Shop"
if tag == "motorcycle": return "Motorcycle Shop"
if tag == "music": return "Music Shop"
if tag == "newsagent": return "Newsagent"
if tag == "optician": return "Optician"
if tag == "organic": return "Organic Food Shop"
if tag == "outdoor": return "Outdoor Shop"
if tag == "pet": return "Pet Shop"
if tag == "photo": return "Photo Shop"
if tag == "salon": return "Salon"
if tag == "shoes": return "Shoe Shop"
if tag == "shopping_centre": return "Shopping Centre"
if tag == "sports": return "Sports Shop"
if tag == "stationery": return "Stationery Shop"
if tag == "supermarket": return "Supermarket"
if tag == "toys": return "Toy Shop"
if tag == "travel_agency": return "Travel Agency"
if tag == "video": return "Video Shop"
if tag == "wine": return "Off License"
if category == "tourism":
if tag == "alpine_hut": return "Alpine Hut"
if tag == "artwork": return "Artwork"
if tag == "attraction": return "Attraction"
if tag == "bed_and_breakfast": return "Bed and Breakfast"
if tag == "cabin": return "Cabin"
if tag == "camp_site": return "Camp Site"
if tag == "caravan_site": return "Caravan Site"
if tag == "chalet": return "Chalet"
if tag == "guest_house": return "Guest House"
if tag == "hostel": return "Hostel"
if tag == "hotel": return "Hotel"
if tag == "information": return "Information"
if tag == "lean_to": return "Lean To"
if tag == "motel": return "Motel"
if tag == "museum": return "Museum"
if tag == "picnic_site": return "Picnic Site"
if tag == "theme_park": return "Theme Park"
if tag == "valley": return "Valley"
if tag == "viewpoint": return "Viewpoint"
if tag == "zoo": return "Zoo"
if category == "waterway":
if tag == "boatyard": return "Boatyard"
if tag == "canal": return "Canal"
if tag == "connector": return "Waterway Connector"
if tag == "dam": return "Dam"
if tag == "derelict_canal": return "Derelict Canal"
if tag == "ditch": return "Ditch"
if tag == "dock": return "Dock"
if tag == "drain": return "Drain"
if tag == "lock": return "Lock"
if tag == "lock_gate": return "Lock Gate"
if tag == "mineral_spring": return "Mineral Spring"
if tag == "mooring": return "Mooring"
if tag == "rapids": return "Rapids"
if tag == "river": return "River"
if tag == "riverbank": return "Riverbank"
if tag == "stream": return "Stream"
if tag == "wadi": return "Wadi"
if tag == "waterfall": return "Waterfall"
if tag == "water_point": return "Water Point"
if tag == "weir": return "Weir"
return tag
| scheibler/WalkersGuide-Server | webserver/translator.py | Python | gpl-3.0 | 67,659 | [
"CASINO"
] | 921aea82ed888611843c1dd6b466506acfd113b9367084cc19e63c3139bad6f8 |
# https://leetcode.com/problems/first-missing-positive/
"""
Similar to Bloomberg's question for Brian.
In this quesiton I am using the invariant that A[i] should contain value i+1
Rest is this explanation:
Time: O(N)
Space: O(1)
http://stackoverflow.com/questions/1586858/find-the-smallest-integer-not-in-a-list
"""
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
N = len(nums)
for i in xrange(N):
number = nums[i]
while number in xrange(1, N+1) and number != nums[number - 1]:
swapped = nums[number-1]
nums[number-1] = number
number = swapped
for i in xrange(N):
if nums[i] != i+1:
return i+1
# after all else it must have been N+1 because we put everything in
# their rightful places.
return N+1
| young-geng/leet_code | problems/41_first-missing-positive/main.py | Python | mit | 943 | [
"Brian"
] | 10770e168882140eae539d133d07c2e9d155690a48aace11b14afc0afb0834e6 |
from datetime import date, time, timedelta
from decimal import Decimal
import random
from django.test import TestCase
from django.utils import timezone
from .models import Person, Role
from .fixtures import create_fixtures
class BulkUpdateTests(TestCase):
def setUp(self):
self.now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA
self.date = date(2015, 3, 28)
self.time = time(13, 0)
create_fixtures()
def _test_field(self, field, idx_to_value_function):
'''
Helper to do repeative simple tests on one field.
'''
# set
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
value = idx_to_value_function(idx)
setattr(person, field, value)
# update
Person.objects.bulk_update(people, update_fields=[field])
# check
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = getattr(person, field)
expected_value = idx_to_value_function(idx)
self.assertEqual(saved_value, expected_value)
def test_simple_fields(self):
fn = lambda idx: idx + 27
for field in ('default', 'big_age', 'age', 'positive_age',
'positive_small_age', 'small_age'):
self._test_field(field, fn)
def test_comma_separated_integer_field(self):
fn = lambda idx: str(idx) + ',27'
self._test_field('comma_separated_age', fn)
def test_boolean_field(self):
fn = lambda idx: [True, False][idx % 2]
self._test_field('certified', fn)
def test_null_boolean_field(self):
fn = lambda idx: [True, False, None][idx % 3]
self._test_field('null_certified', fn)
def test_char_field(self):
NAMES = ['Walter', 'The Dude', 'Donny', 'Jesus', 'Buddha', 'Clark']
fn = lambda idx: NAMES[idx % 5]
self._test_field('name', fn)
def test_email_field(self):
EMAILS = ['walter@mailinator.com', 'thedude@mailinator.com',
'donny@mailinator.com', 'jesus@mailinator.com',
'buddha@mailinator.com', 'clark@mailinator.com']
fn = lambda idx: EMAILS[idx % 5]
self._test_field('email', fn)
def test_file_path_field(self):
PATHS = ['/home/dummy.txt', '/Downloads/kitten.jpg',
'/Users/user/fixtures.json', 'dummy.png',
'users.json', '/home/dummy.png']
fn = lambda idx: PATHS[idx % 5]
self._test_field('file_path', fn)
def test_slug_field(self):
SLUGS = ['jesus', 'buddha', 'clark', 'the-dude', 'donny', 'walter']
fn = lambda idx: SLUGS[idx % 5]
self._test_field('slug', fn)
def test_text_field(self):
TEXTS = ['this is a dummy text', 'dummy text', 'bla bla bla bla bla',
'here is a dummy text', 'dummy', 'bla bla bla']
fn = lambda idx: TEXTS[idx % 5]
self._test_field('text', fn)
def test_url_field(self):
URLS = ['docs.djangoproject.com', 'news.ycombinator.com',
'https://docs.djangoproject.com', 'https://google.com',
'google.com', 'news.ycombinator.com']
fn = lambda idx: URLS[idx % 5]
self._test_field('url', fn)
def test_date_time_field(self):
fn = lambda idx: self.now - timedelta(days=1 + idx, hours=1 + idx)
self._test_field('date_time', fn)
def test_date_field(self):
fn = lambda idx: self.date - timedelta(days=1 + idx)
self._test_field('date', fn)
def test_time_field(self):
fn = lambda idx: time(1 + idx, idx)
self._test_field('time', fn)
def test_decimal_field(self):
fn = lambda idx: Decimal('1.%s' % (50 + idx * 7))
self._test_field('height', fn)
def test_float_field(self):
fn = lambda idx: float(idx) * 2.0
self._test_field('float_height', fn)
def test_generic_ipaddress_field(self):
IPS = ['127.0.0.1', '192.0.2.30', '2a02:42fe::4', '10.0.0.1',
'8.8.8.8']
fn = lambda idx: IPS[idx % 5]
self._test_field('remote_addr', fn)
def test_image_field(self):
IMGS = ['kitten.jpg', 'dummy.png', 'user.json', 'dummy.png', 'foo.gif']
fn = lambda idx: IMGS[idx % 5]
self._test_field('image', fn)
self._test_field('my_file', fn)
def test_custom_fields(self):
values = {}
people = Person.objects.all()
people_dict = {p.name: p for p in people}
person = people_dict['Mike']
person.data = {'name': 'mikey', 'age': 99, 'ex': -99}
values[person.pk] = {'name': 'mikey', 'age': 99, 'ex': -99}
person = people_dict['Mary']
person.data = {'names': {'name': []}}
values[person.pk] = {'names': {'name': []}}
person = people_dict['Pete']
person.data = []
values[person.pk] = []
person = people_dict['Sandra']
person.data = [{'name': 'Pete'}, {'name': 'Mike'}]
values[person.pk] = [{'name': 'Pete'}, {'name': 'Mike'}]
person = people_dict['Ash']
person.data = {'text': 'bla'}
values[person.pk] = {'text': 'bla'}
person = people_dict['Crystal']
values[person.pk] = person.data
Person.objects.bulk_update(people)
people = Person.objects.all()
for person in people:
self.assertEqual(person.data, values[person.pk])
def test_update_fields(self):
"""
Only the fields in "update_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, update_fields=['age'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.age, person2.age)
self.assertNotEqual(person1.height, person2.height)
def test_update_foreign_key_fields(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people)
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_fields_explicit(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height', 'role'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_fields_explicit_with_id_suffix(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height', 'role_id'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_exclude_fields_explicit(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height'],
exclude_fields=['role'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertTrue(isinstance(person1.role, Role))
self.assertEqual(person2.role, None)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_exclude_fields_explicit_with_id_suffix(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height'],
exclude_fields=['role_id'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertTrue(isinstance(person1.role, Role))
self.assertEqual(person2.role, None)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_exclude_fields(self):
"""
Only the fields not in "exclude_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, exclude_fields=['age'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertNotEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_object_list(self):
"""
Pass in a list instead of a queryset for bulk updating
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
Person.objects.bulk_update(list(people))
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
self.assertEqual(person.big_age, idx + 27)
def test_empty_list(self):
"""
Update no elements, passed as a list
"""
Person.objects.bulk_update([])
def test_empty_queryset(self):
"""
Update no elements, passed as a queryset
"""
Person.objects.bulk_update(Person.objects.filter(name="Aceldotanrilsteucsebces ECSbd (funny name, isn't it?)"))
def test_one_sized_list(self):
"""
Update one sized list, check if have a syntax error for some db backends.
"""
Person.objects.bulk_update(Person.objects.all()[:1])
def test_one_sized_queryset(self):
"""
Update one sized list, check if have a syntax error for some db backends.
"""
Person.objects.bulk_update(Person.objects.filter(name='Mike'))
def test_wrong_field_names(self):
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update, people, update_fields=['somecolumn', 'name'])
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update, people, exclude_fields=['somecolumn'])
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update, people,
update_fields=['somecolumn'], exclude_fields=['someothercolumn'])
def test_batch_size(self):
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
updated_obj_count = Person.objects.bulk_update(people, batch_size=1)
self.assertEqual(updated_obj_count, len(people))
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
| HowGood/django-bulk-update | tests/tests.py | Python | mit | 13,421 | [
"CRYSTAL"
] | 38da9e8a44d94e380667b0414e59310af774021d461be125ffe148b2495ec03f |
../../../../../../../share/pyshared/orca/scripts/apps/gedit/__init__.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/gedit/__init__.py | Python | gpl-3.0 | 71 | [
"ORCA"
] | bd53e1bccc31d97bf3758ff4092aba71cd0df61eacb727bd5117e4ed68137e05 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Jul 17, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 17, 2012"
import numpy as np
import pickle
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.lattice import Lattice
from pymatgen.core.composition import Composition
class SiteTest(PymatgenTest):
def setUp(self):
self.ordered_site = Site("Fe", [0.25, 0.35, 0.45])
self.disordered_site = Site({"Fe": 0.5, "Mn": 0.5},
[0.25, 0.35, 0.45])
self.propertied_site = Site("Fe2+", [0.25, 0.35, 0.45],
{'magmom': 5.1, 'charge': 4.2})
self.dummy_site = Site("X", [0, 0, 0])
def test_properties(self):
self.assertRaises(AttributeError, getattr, self.disordered_site,
'specie')
self.assertIsInstance(self.ordered_site.specie, Element)
self.assertEqual(self.propertied_site.magmom, 5.1)
self.assertEqual(self.propertied_site.charge, 4.2)
def test_to_from_dict(self):
d = self.disordered_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site, self.disordered_site)
self.assertNotEqual(site, self.ordered_site)
d = self.propertied_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.magmom, 5.1)
self.assertEqual(site.charge, 4.2)
d = self.dummy_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.species_and_occu, self.dummy_site.species_and_occu)
def test_hash(self):
self.assertEqual(self.ordered_site.__hash__(), 26)
self.assertEqual(self.disordered_site.__hash__(), 51)
def test_cmp(self):
self.assertTrue(self.ordered_site > self.disordered_site)
def test_distance(self):
osite = self.ordered_site
self.assertAlmostEqual(np.linalg.norm([0.25, 0.35, 0.45]),
osite.distance_from_point([0, 0, 0]))
self.assertAlmostEqual(osite.distance(self.disordered_site), 0)
def test_pickle(self):
o = pickle.dumps(self.propertied_site)
self.assertEqual(pickle.loads(o), self.propertied_site)
class PeriodicSiteTest(PymatgenTest):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.si = Element("Si")
self.site = PeriodicSite("Fe", [0.25, 0.35, 0.45],
self.lattice)
self.site2 = PeriodicSite({"Si": 0.5}, [0, 0, 0], self.lattice)
self.assertEqual(self.site2.species_and_occu,
Composition({Element('Si'): 0.5}),
"Inconsistent site created!")
self.propertied_site = PeriodicSite(Specie("Fe", 2),
[0.25, 0.35, 0.45],
self.lattice,
properties={'magmom': 5.1,
'charge': 4.2})
self.dummy_site = PeriodicSite("X", [0, 0, 0], self.lattice)
def test_properties(self):
"""
Test the properties for a site
"""
self.assertEqual(self.site.a, 0.25)
self.assertEqual(self.site.b, 0.35)
self.assertEqual(self.site.c, 0.45)
self.assertEqual(self.site.x, 2.5)
self.assertEqual(self.site.y, 3.5)
self.assertEqual(self.site.z, 4.5)
self.assertTrue(self.site.is_ordered)
self.assertFalse(self.site2.is_ordered)
self.assertEqual(self.propertied_site.magmom, 5.1)
self.assertEqual(self.propertied_site.charge, 4.2)
def test_distance(self):
other_site = PeriodicSite("Fe", np.array([0, 0, 0]), self.lattice)
self.assertAlmostEqual(self.site.distance(other_site), 6.22494979899,
5)
def test_distance_from_point(self):
self.assertNotAlmostEqual(self.site.distance_from_point([0.1, 0.1,
0.1]),
6.22494979899, 5)
self.assertAlmostEqual(self.site.distance_from_point([0.1, 0.1, 0.1]),
6.0564015718906887, 5)
def test_distance_and_image(self):
other_site = PeriodicSite("Fe", np.array([1, 1, 1]), self.lattice)
(distance, image) = self.site.distance_and_image(other_site)
self.assertAlmostEqual(distance, 6.22494979899, 5)
self.assertTrue(([-1, -1, -1] == image).all())
(distance, image) = self.site.distance_and_image(other_site, [1, 0, 0])
self.assertAlmostEqual(distance, 19.461500456028563, 5)
# Test that old and new distance algo give the same ans for
# "standard lattices"
lattice = Lattice(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
site1 = PeriodicSite("Fe", np.array([0.01, 0.02, 0.03]), lattice)
site2 = PeriodicSite("Fe", np.array([0.99, 0.98, 0.97]), lattice)
self.assertAlmostEqual(get_distance_and_image_old(site1, site2)[0],
site1.distance_and_image(site2)[0])
lattice = Lattice.from_parameters(1, 0.01, 1, 10, 10, 10)
site1 = PeriodicSite("Fe", np.array([0.01, 0.02, 0.03]), lattice)
site2 = PeriodicSite("Fe", np.array([0.99, 0.98, 0.97]), lattice)
self.assertTrue(get_distance_and_image_old(site1, site2)[0] >
site1.distance_and_image(site2)[0])
site2 = PeriodicSite("Fe", np.random.rand(3), lattice)
(dist_old, jimage_old) = get_distance_and_image_old(site1, site2)
(dist_new, jimage_new) = site1.distance_and_image(site2)
self.assertTrue(dist_old - dist_new > -1e-8,
"New distance algo should give smaller answers!")
self.assertFalse((abs(dist_old - dist_new) < 1e-8) ^
(jimage_old == jimage_new).all(),
"If old dist == new dist, images must be the same!")
latt = Lattice.from_parameters(3.0, 3.1, 10.0, 2.96, 2.0, 1.0)
site = PeriodicSite("Fe", [0.1, 0.1, 0.1], latt)
site2 = PeriodicSite("Fe", [0.99, 0.99, 0.99], latt)
(dist, img) = site.distance_and_image(site2)
self.assertAlmostEqual(dist, 0.15495358379511573)
self.assertEqual(list(img), [-11, 6, 0])
def test_is_periodic_image(self):
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.45]), self.lattice)
self.assertTrue(self.site.is_periodic_image(other),
"This other site should be a periodic image.")
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.46]), self.lattice)
self.assertFalse(self.site.is_periodic_image(other),
"This other site should not be a periodic image.")
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.45]),
Lattice.rhombohedral(2, 60))
self.assertFalse(self.site.is_periodic_image(other),
"Different lattices should not be periodic images.")
def test_equality(self):
other_site = PeriodicSite("Fe", np.array([1, 1, 1]), self.lattice)
self.assertTrue(self.site.__eq__(self.site))
self.assertFalse(other_site.__eq__(self.site))
self.assertFalse(self.site.__ne__(self.site))
self.assertTrue(other_site.__ne__(self.site))
def test_as_from_dict(self):
d = self.site2.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site, self.site2)
self.assertNotEqual(site, self.site)
d = self.propertied_site.as_dict()
site3 = PeriodicSite({"Si": 0.5, "Fe": 0.5}, [0, 0, 0], self.lattice)
d = site3.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site.species_and_occu, site3.species_and_occu)
d = self.dummy_site.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site.species_and_occu, self.dummy_site.species_and_occu)
def test_to_unit_cell(self):
site = PeriodicSite("Fe", np.array([1.25, 2.35, 4.46]), self.lattice)
site = site.to_unit_cell
val = [0.25, 0.35, 0.46]
self.assertArrayAlmostEqual(site.frac_coords, val)
def get_distance_and_image_old(site1, site2, jimage=None):
"""
Gets distance between two sites assuming periodic boundary conditions.
If the index jimage of two sites atom j is not specified it selects the
j image nearest to the i atom and returns the distance and jimage
indices in terms of lattice vector translations. If the index jimage of
atom j is specified it returns the distance between the i atom and the
specified jimage atom, the given jimage is also returned.
Args:
other:
other site to get distance from.
jimage:
specific periodic image in terms of lattice translations,
e.g., [1,0,0] implies to take periodic image that is one
a-lattice vector away. If jimage is None, the image that is
nearest to the site is found.
Returns:
(distance, jimage):
distance and periodic lattice translations of the other site
for which the distance applies.
.. note::
Assumes the primitive cell vectors are sufficiently not skewed such
that the condition \|a\|cos(ab_angle) < \|b\| for all possible cell
vector pairs. ** this method does not check this condition **
"""
if jimage is None:
#Old algorithm
jimage = -np.array(np.around(site2.frac_coords - site1.frac_coords),
int)
mapped_vec = site1.lattice.get_cartesian_coords(jimage
+ site2.frac_coords
- site1.frac_coords)
dist = np.linalg.norm(mapped_vec)
return dist, jimage
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
import unittest2 as unittest
unittest.main()
| aykol/pymatgen | pymatgen/core/tests/test_sites.py | Python | mit | 10,450 | [
"pymatgen"
] | 524428c7c0bc260a925e3f27d7f7a4ea15dcd2a0b276494405e1fe7b1d07b1c5 |
# $Id$
#
# Copyright (C) 2016 Novartis Institute of BioMedical Research
# All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from rdkit import Chem
from rdkit.Chem import rdStructChecker
import unittest
data = """310929550
-OEChem-07211613022D
55 60 0 0 0 0 0 0 0999 V2000
-3.6737 1.4194 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-4.8298 2.0868 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.7144 1.2798 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-3.5968 2.8875 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-2.8579 0.8673 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
6.4302 1.2798 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
5.0013 -1.1952 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
8.5736 0.8673 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
10.7170 1.2798 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
12.8605 0.8673 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-4.0093 2.1731 0.0000 N 0 3 0 0 0 0 0 0 0 0 0 0
-3.5724 -0.3702 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
-2.1434 -0.3702 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
2.1434 1.2798 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
3.5724 1.2798 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
2.8579 0.0423 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
-4.2868 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-5.0013 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-4.2868 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-5.7158 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-5.7158 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-5.0013 -0.3702 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-6.4302 -0.3702 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7144 -0.3702 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.4290 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.8579 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.4290 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7144 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7144 -1.1952 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7144 -0.3702 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.4290 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-6.4302 -1.1952 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-7.1447 -0.7827 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-7.1447 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 -1.6077 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7144 -1.1952 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
2.8579 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.4290 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.2868 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
2.1434 -0.3702 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.0013 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.7158 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.2868 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.0013 -0.3702 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.7158 0.0423 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
7.1447 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.2868 -1.6077 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
7.8592 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
9.2881 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
10.0026 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
11.4316 0.8673 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
12.1460 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
13.5750 1.2798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 11 1 0 0 0 0
1 17 1 0 0 0 0
2 11 1 0 0 0 0
2 18 1 0 0 0 0
3 27 1 0 0 0 0
3 33 1 0 0 0 0
4 11 2 0 0 0 0
5 28 2 0 0 0 0
6 44 1 0 0 0 0
6 48 1 0 0 0 0
7 46 1 0 0 0 0
7 49 1 0 0 0 0
8 50 1 0 0 0 0
8 51 1 0 0 0 0
9 52 1 0 0 0 0
9 53 1 0 0 0 0
10 54 1 0 0 0 0
10 55 1 0 0 0 0
12 19 1 0 0 0 0
12 28 1 0 0 0 0
13 26 1 0 0 0 0
13 28 1 0 0 0 0
14 33 2 0 0 0 0
14 39 1 0 0 0 0
15 39 1 0 0 0 0
15 41 1 0 0 0 0
16 39 2 0 0 0 0
16 42 1 0 0 0 0
17 18 1 0 0 0 0
17 19 2 0 0 0 0
18 21 2 0 0 0 0
19 22 1 0 0 0 0
20 21 1 0 0 0 0
20 22 2 0 0 0 0
20 23 1 0 0 0 0
23 34 1 0 0 0 0
23 35 1 0 0 0 0
23 36 1 0 0 0 0
24 25 1 0 0 0 0
24 26 2 0 0 0 0
24 31 1 0 0 0 0
25 27 2 0 0 0 0
25 32 1 0 0 0 0
26 29 1 0 0 0 0
27 30 1 0 0 0 0
29 30 2 0 0 0 0
31 37 2 0 0 0 0
32 38 2 0 0 0 0
33 40 1 0 0 0 0
37 38 1 0 0 0 0
40 42 2 0 0 0 0
41 43 2 0 0 0 0
41 45 1 0 0 0 0
43 44 1 0 0 0 0
44 47 2 0 0 0 0
45 46 2 0 0 0 0
46 47 1 0 0 0 0
48 50 1 0 0 0 0
51 52 1 0 0 0 0
53 54 1 0 0 0 0
M CHG 1 11 1
M END
"""
class TestCase(unittest.TestCase):
def testStructOptions(self):
rdStructChecker.StructCheckerOptions()
def testStructChecker(self):
checker = rdStructChecker.StructChecker()
m = Chem.MolFromMolBlock(data)
self.assertTrue(m)
res = checker.CheckMolStructure(m)
self.assertEquals(res, rdStructChecker.StructureFlags.ATOM_CHECK_FAILED)
if __name__ == '__main__':
unittest.main()
| bp-kelley/rdkit | Code/GraphMol/StructChecker/Wrap/rough_test.py | Python | bsd-3-clause | 7,432 | [
"RDKit"
] | 9c7b98682d9a41f75c60ad97b1eaa3d22a38b9f124d750bb17080668f28c6d95 |
import pandas as pd
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
import numpy as np
from model_addons import add_to_model
from copy import deepcopy
class rates(object):
def __init__(self):
self.model = create_cobra_model_from_sbml_file("../data/iJO1366.xml")
# Modify model
convert_to_irreversible(self.model)
self.rxns = dict([(r.id, r) for r in self.model.reactions])
self.genes = dict([(g.id, g) for g in self.model.genes])
add_to_model(self.model)
self.include_specific_isozmyes()
self.gc = pd.DataFrame.from_csv("../data/growth_conditions.csv")
flux = pd.DataFrame.from_csv('../data/flux[mmol_gCDW_h].csv')
self.v = self._convert_mmol_gCDW_h_to_mmol_gCDW_s(flux)
# PPKr_reverse reaction is used for ATP generation from ADP
# in the FBA model. Nevertheless, acording to EcoCyc, it is used to
# to generate polyP (inorganic phosphate) chains from ATP and it is not
# part of the oxidative phosphorilation, thus removed from rate calculations
if 'PPKr_reverse' in self.v.index:
self.v.drop('PPKr_reverse', axis=0, inplace=True)
self.enzymatic_reactions = self._enzymatic_reactions()
self.homomeric_reactions = self.reactions_by_homomeric_enzymes()
proteins_copies_fL = pd.DataFrame.from_csv('../data/meta_abundance[copies_fL].csv')
self.proteins_mmol_gCDW = self._convert_copies_fL_to_mmol_gCDW(proteins_copies_fL)
self.E = self.map_expression_by_reaction()
self.kapp = self.get_kapp() # per subunit
self.SA = self.get_specific_activity()
self.kcat = pd.DataFrame.from_csv("../data/kcat_data.csv")
self.p_per_as = (self.kcat['polypeptides per complex']
/ self.kcat['catalytic sites per complex'])
self.kmax = self.get_kmax(self.kapp)
self.SAmax = self.get_maximum_specific_activity(self.SA)
def include_specific_isozmyes(self):
'''
Possible add-ons to the list of unique homomeric enzymes
obtained by the function "reactions_to_unique_enzyme".
These isoenzymes are known to have only one active isoenzyme
across all tested conditions and therefore were manually added
'''
pairs = [
('METS','b3829'),# metE - cobalamin-independent homocysteine transmethylase - The aerobic enzyme - the other isoenzyme operates under anaerobic conditions
('HCO3E','b0126'),# can - carbonic anhydrase
('PFK','b3916'), # 6-phosphofructokinase - pfkB accounts for above 90% of enzymatic activity (EcoCyc)
('RPI','b2914'), # ribose-5-phosphate isomerase A
('RPE', 'b3386') # ribulose-5-phosphate 3-epimerase - other isozyme is according to predicted activity.
]
for (r,g) in pairs:
self.rxns[r]._genes = [self.model.genes.get_by_id(g)]
self.rxns[r].gene_reaction_rule = '('+g+')'
def _enzymatic_reactions(self):
'''
Returns a list of cobra Reaction objects catalyzed by enzymes.
'''
reactions = filter(lambda r:len(r.genes)>=1, self.model.reactions)
genes = [list(r.genes) for r in reactions]
return dict(zip(reactions,genes))
def reactions_by_unique_enzyme(self):
'''
Returns a list of reactions (as cobra REACTION objects)
in the model catalyzed by unique enzymes. Enzymes can either
be homomeric or hetorometic complexes.
'''
one_enzyme_reac = filter(lambda r: 'or' not in r.gene_reaction_rule,
self.enzymatic_reactions.keys())
genes = [list(r.genes) for r in one_enzyme_reac]
return dict(zip(one_enzyme_reac,genes))
def reactions_by_homomeric_enzymes(self):
'''
Returns a list of reactions (as cobra REACTION objects)
in the model catalyzed by unique enzymes which are composed
of a single polypeptide chain, i.e., unique homomeric enzymes.
'''
homomers = filter(lambda r: len(r.genes)==1,
self.enzymatic_reactions.keys())
genes = [list(r.genes)[0] for r in homomers]
return dict(zip(homomers,genes))
def _convert_copies_fL_to_mmol_gCDW(self, expression_data):
'''
Convertes the units of proteomics data (usually reported in
copies per fL of cytoplasm) to units of mmol per gCDW.
This unit conversion is performed to match flux units from
metabolic models (usually given in mmol/gCDW/h)
'''
rho = 1100 # average cell density gr/liter
DW_fraction = 0.3 # fraction of DW of cells
Avogadro = 6.02214129 # Avogadro's number "exponent-less"
expression_data[expression_data<10] = np.nan
expression_data /= (Avogadro*1e5)
expression_data /= (rho * DW_fraction)
return expression_data
def _convert_mmol_gCDW_h_to_mmol_gCDW_s(self, flux_data):
'''
Convertes the units of flux data (usually reported in
mmol/gCDW/h) to units of mmol/gCDW per second.
This unit conversion is performed to allow calculation of
turnover rates in units of s^-1, as traditioanlly excepted.
'''
flux_data /= 3600
return flux_data
def _convert_mmol_gCDW_to_mg_gCDW(self, expression_data):
genes = set(self.genes.keys()) & (set(expression_data.index))
mass = [self.genes[g].MW for g in genes]
MW = pd.Series(index=genes, data=mass)
return expression_data.loc[MW.index].mul(MW, axis=0)
def map_expression_by_reaction(self):
gc = self.v.columns & self.proteins_mmol_gCDW.columns
tmp = {k.id:v.id for k,v in self.reactions_by_homomeric_enzymes().iteritems()}
E = pd.DataFrame(index=tmp.keys(),columns=gc)
for i in E.index:
if tmp[i] in self.proteins_mmol_gCDW.index:
E.loc[i] = self.proteins_mmol_gCDW.loc[tmp[i]]
E.dropna(how='all', inplace=True)
return E
def get_kapp(self):
'''
Calculates the catalytic rate of a single subunit of a homomeric
enzyme for a given reaction, by dividing the flux through the
reaction by the abundance of the polypeptide chain that comprises
the enzyme.
Arguments:
flux [mmol/gCDW/s]
proteomics [mmol/gCDW]
Returns:
pandas dataframe with catalytic rates per polypeptide chain
in units of s^-1. Rows are reactions, columns are conditions
'''
rate = self.v.div(self.E)
rate.replace([0, np.inf, -np.inf], np.nan, inplace=True)
rate.dropna(how='all', inplace=True)
return rate
def get_kmax(self, kapp, minimal_conditions=5):
'''
Take the maximum rate of a given enzyme-reaction pair
across all conditions.
Arguments:
catalytic rate of enzyme-reaction pairs across conditions
as a pandas dataframe. Rows are reactions, columns are conditions
Returns:
Maximal rate for each enzyme-reaction pair, the condition in
which it was found, the metabolic pathway associated with
the reaction and the carbon source on which the cells were grown.
Notice that maximal rates are given per polypeptide chain and
per active site in two seperate columns.
Rate units are s^-1.
'''
kapp.dropna(thresh=minimal_conditions, inplace=True)
kmax = pd.DataFrame(index=kapp.index)
subsystems =[self.rxns[r].subsystem for r in kmax.index]
genes = [list(self.rxns[r].genes)[0].id for r in kmax.index]
names = [list(self.rxns[r].genes)[0].name for r in kmax.index]
kmax.index.name = 'reaction'
kmax['bnumber'] = genes
kmax['primary gene name (uniprot)'] = names
kmax['kmax per chain [s^-1]'] = kapp.max(axis=1)
tmp = self.kapp.loc[kmax.index].mul(self.p_per_as[kmax.index], axis=0)
kmax['kmax per active site [s-1]'] = tmp.max(axis=1)
kmax['subsystem'] = subsystems
kmax['condition'] = kapp.idxmax(axis=1)
return kmax
def get_specific_activity(self):
'''
Calculates the specific activity in units of umol/mg/min
for all reactions in the model. The sum of all associated
polypeptide chains is used as the molecular weight of the enzyme
and the flux through the reaction is divided by this weight.
Notice that if a reaction can be carried by several different enzymes,
i.e., isoenzymes, the returned values are a weighted average of the
rate of the enzymes by their mass.
Arguments:
flux [mmol/gCDW/s]
proteomics [mmol/gCDW]
Returns:
pandas dataframe with specific activeites of enzymes
in units of umol/mg/min. Rows are reactions, columns are conditions
'''
weighted_mass = self._convert_mmol_gCDW_to_mg_gCDW(self.proteins_mmol_gCDW)
reactions = map(lambda x: x.id, self.enzymatic_reactions)
SA = pd.DataFrame(index=reactions, columns=self.gc.index)
for r in self.enzymatic_reactions:
genes = map(lambda x: x.id, r.genes)
try:
SA.loc[r.id] = self.v.loc[r.id] / weighted_mass.loc[genes].sum()
except KeyError:
continue
SA.replace([0, np.inf, -np.inf], np.nan, inplace=True)
SA.dropna(how='all', inplace=True)
return SA * 1000 * 60
def get_maximum_specific_activity(self, specific_activity, minimal_conditions=5):
'''
Take the maximum rate of a given enzyme-reaction pair
across all conditions.
Arguments:
specific activities of enzyme-reaction pairs across conditions
as a pandas dataframe. Rows are reactions, columns are conditions
Returns:
Maximal specific activity for each enzyme-reaction pair,
the condition in which it was found, the metabolic pathway
associated with the reaction and the carbon source on which
the cells were grown.
Notice that maximal specific activities are given for the sum
of all associated enzymes, thus represent the weighted average
of the specific activites of the isoenzymes. Being a weighted
average, it means that the values underestimate the maximal
potential rate.
'''
specific_activity.dropna(thresh=minimal_conditions, inplace=True)
SAmax = pd.DataFrame(index=specific_activity.index)
reactions = map(self.model.reactions.get_by_id, SAmax.index)
subsystems = map(lambda r: r.subsystem, reactions)
SAmax['max specific activity [umol/mg/min]'] = specific_activity.max(axis=1)
SAmax['subsystem'] = subsystems
SAmax['condition'] = specific_activity.idxmax(axis=1)
return SAmax
def get_second_max(self):
'''
Finds the second maximal kapp value by reaction
Arguments:
self
Returns:
Pandas Series with reactions as index and snd max as values
'''
rate = self.kapp.mul(self.p_per_as, axis=0)
rate.dropna(how='all', inplace=True)
second = pd.Series(index=rate.index)
for r in rate.index:
array = sorted(rate.loc[r])
second[r] = array[-2]
return second
def _perform_pFBA(self, model, cs='glc', gr=1, ur=10):
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
rxns = dict([(r.id, r) for r in model.reactions])
rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized
try:
rxns['EX_' + cs + '_e'].lower_bound = -ur # redefine sole carbon source uptake reaction in mmol/gr/h
except:
print cs, ur
rxns['EX_glc_e'].lower_bound = -ur
rxns['Ec_biomass_iJO1366_core_53p95M'].upper_bound = gr
print "solving pFBA",
optimize_minimal_flux(model, already_irreversible=True)
flux_dist = pd.DataFrame(model.solution.x_dict.items()).set_index(0)
return flux_dist
def _overwrite_pFBA_file(self):
reactions = [r.id for r in self.model.reactions]
fluxes = pd.DataFrame(index=reactions, columns=self.gc.index)
for c in self.gc.iterrows():
gr = c[1]['growth rate [h-1]']
cs = c[1]['media_key']
ur = c[1]['uptake rate [mmol gCDW-1 h-1]']
if np.isnan(ur):
ur = 18.5
model = deepcopy(self.model)
fluxes[c[0]] = self._perform_pFBA(model, cs, gr, ur)
print "- %s" %c[0]
fluxes.index.name = 'reaction'
''' export results '''
fluxes.to_csv('../data/flux[mmol_gCDW_h].csv')
if __name__ == "__main__":
R = rates()
kcat = R.kcat['kcat per active site [s-1]'].dropna()
kmax = R.kmax['kmax per active site [s-1]'].dropna()
index = kcat.index & kmax.index
kcat = kcat[index]
kmax = kmax[index]
from scipy import stats
pearson = stats.pearsonr(np.log(kcat), np.log(kmax))
spearman = stats.spearmanr(kcat, kmax)
kendal = stats.kendalltau(kcat, kmax)
| dandanvidi/in-vivo-catalytic-rates | scripts/catalytic_rates.py | Python | mit | 14,326 | [
"Avogadro"
] | 70224ee8b07975bcaa4d3c927096581fa360d3bc437d30fb2f04b7340302ce59 |
import casadi as ca
import pylab as pl
import casiopeia as cp
import os
# (Model and data taken from: Diehl, Moritz: Course on System Identification,
# exercise 7, SYSCOP, IMTEK, University of Freiburg, 2014/2015)
# Defining constant problem parameters:
#
# - m: representing the ball of the mass in kg
# - L: the length of the pendulum bar in meters
# - g: the gravity constant in m/s^2
# - psi: the actuation angle of the manuver in radians, which stays
# constant for this problem
m = 1.0
L = 3.0
g = 9.81
# psi = pl.pi / 2.0
psi = pl.pi / (180.0 * 2)
# System
x = ca.MX.sym("x", 2)
p = ca.MX.sym("p", 1)
u = ca.MX.sym("u", 1)
# f = ca.vertcat([x[1], p[0]/(m*(L**2))*(u-x[0]) - g/L * pl.sin(x[0])])
f = ca.vertcat(x[1], p[0]/(m*(L**2))*(u-x[0]) - g/L * x[0])
phi = x
system = cp.system.System(x = x, u = u, p = p, f = f, phi = phi)
data = pl.loadtxt('data_pendulum.txt')
time_points = data[:500, 0]
numeas = data[:500, 1]
wmeas = data[:500, 2]
N = time_points.size
ydata = pl.array([numeas,wmeas])
udata = [psi] * (N-1)
ptrue = [3.0]
sim_true = cp.sim.Simulation(system, ptrue)
sim_true.run_system_simulation(time_points = time_points, \
x0 = ydata[:, 0], udata = udata)
# pl.figure()
# pl.plot(time_points, pl.squeeze(sim_true.simulation_results[0,:]))
# pl.plot(time_points, pl.squeeze(sim_true.simulation_results[1,:]))
# pl.show()
p_test = []
sigma = 0.1
wv = (1. / sigma**2) * pl.ones(ydata.shape)
repetitions = 100
for k in range(repetitions):
y_randn = sim_true.simulation_results + \
sigma * (pl.randn(*sim_true.simulation_results.shape))
pe_test = cp.pe.LSq(system = system, time_points = time_points,
udata = udata, xinit = y_randn, ydata = y_randn, wv = wv, pinit = 1)
pe_test.run_parameter_estimation()
p_test.append(pe_test.estimated_parameters)
p_mean = pl.mean(p_test)
p_std = pl.std(p_test, ddof=0)
pe_test.compute_covariance_matrix()
pe_test.print_estimation_results()
# Generate report
print("\np_mean = " + str(ca.DM(p_mean)))
print("phat_last_exp = " + str(ca.DM(pe_test.estimated_parameters)))
print("\np_sd = " + str(ca.DM(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
print("beta = " + str(pe_test.beta))
print("\ndelta_abs_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
print("delta_rel_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)))
fname = os.path.basename(__file__)[:-3] + ".rst"
report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================
Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.
.. code-block:: python
y_randn = sim_true.simulation_results + sigma * \
(np.random.randn(*sim_true.estimated_parameters.shape))
Afterwards, compute standard deviation of estimated parameters,
and compare to single covariance matrix computation done in PECas.
''')
prob = "ODE, 2 states, 1 control, 1 param, (pendulum linear)"
report.write(prob)
report.write("\n" + "-" * len(prob) + "\n\n.. code-block:: python")
report.write( \
'''.. code-block:: python
---------------------- casiopeia system definition -----------------------
The system is a dynamic system defined by a set of
explicit ODEs xdot which establish the system state x:
xdot = f(t, u, x, p, we, wu)
and by an output function phi which sets the system measurements:
y = phi(t, x, p).
Particularly, the system has:
1 inputs u
1 parameters p
2 states x
2 outputs phi
Where xdot is defined by:
xdot[0] = x[1]
xdot[1] = (((p/9)*(u-x[0]))-(3.27*x[0]))
And where phi is defined by:
y[0] = x[0]
y[1] = x[1]
''')
report.write("\n**Test results:**\n\n.. code-block:: python")
report.write("\n\n repetitions = " + str(repetitions))
report.write("\n sigma = " + str(sigma))
report.write("\n\n p_true = " + str(ca.DM(ptrue)))
report.write("\n\n p_mean = " + str(ca.DM(p_mean)))
report.write("\n phat_last_exp = " + \
str(ca.DM(pe_test.estimated_parameters)))
report.write("\n\n p_sd = " + str(ca.DM(p_std)))
report.write("\n sd_from_covmat = " \
+ str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
report.write("\n beta = " + str(pe_test.beta))
report.write("\n\n delta_abs_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
report.write("\n delta_rel_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)) \
+ "\n")
report.close()
try:
os.system("rst2pdf " + fname)
except:
print("Generating PDF report failed, is rst2pdf installed correctly?")
| adbuerger/casiopeia | concept_tests/sd_check_pendulum_linear.py | Python | lgpl-3.0 | 4,929 | [
"Gaussian"
] | 20b63eb2ee8038a30b9690d52c13d43018ae24b343253344f45707d33c8fc33e |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 10:34:07 2016
@author: Radu
"""
from cell import Cell
from neuron import h
class ClarkeRelay(Cell): #### Inherits from Cell
"""Two-section cell: A soma with active channels and
a dendrite with passive properties."""
#### __init__ is gone and handled in Cell.
#### We can override __init__ completely, or do some of
#### our own initialization first, and then let Cell do its
#### thing, and then do a bit more ourselves with "super".
####
#### def __init__(self):
#### # Do some stuff
#### super(Cell, self).__init__()
#### # Do some more stuff
#
def create_sections(self):
"""Create the sections of the cell."""
self.soma = h.Section(name='soma', cell=self)
self.dend = h.Section(name='dend', cell=self)
#
def build_topology(self):
"""Connect the sections of the cell to build a tree."""
self.dend.connect(self.soma(1))
#
def define_geometry(self):
"""Set the 3D geometry of the cell."""
self.soma.L = 35 # microns
self.soma.diam = 25 # microns
self.dend.L = 400 # microns
self.dend.diam = 1 # microns
self.dend.nseg = 9
self.shape_3D()
#
def define_biophysics(self):
"""Assign the membrane properties across the cell."""
for sec in self.all: # 'all' exists in parent object.
sec.Ra = 70 # Axial resistance in Ohm * cm
sec.cm = 1 # Membrane capacitance in micro Farads / cm^2
# Insert active Hodgkin-Huxley current in the soma
self.dap_syn_ = h.Exp2Syn(self.soma(0.5))
self.dap_syn_.tau1 = 2
self.dap_syn_.tau2 = 5
self.dap_syn_.e = 50
self.dap_nc_ = h.NetCon(self.soma(0.5)._ref_v,\
self.dap_syn_, sec=self.soma)
self.dap_nc_.delay = 0
self.dap_nc_.threshold = 10
self.soma.insert('clarke')
self.soma.gl_clarke = 0.003
self.soma.tau_n_bar_clarke = 7
self.dap_nc_.weight[0] = 7.5e-3
self.soma.gkrect_clarke = 0.6
self.soma.insert('extracellular')
# Insert passive current in the dendrite
self.dend.insert('pas')
self.dend.g_pas = 0.001 # Passive conductance in S/cm2
self.dend.e_pas = -54.3 # Leak reversal potential mV
#
def shape_3D(self):
"""
Set the default shape of the cell in 3D coordinates.
Set soma(0) to the origin (0,0,0) and dend extending along
the X-axis.
"""
len1 = self.soma.L
h.pt3dclear(sec=self.soma)
h.pt3dadd(0, 0, 0, self.soma.diam, sec=self.soma)
h.pt3dadd(len1, 0, 0, self.soma.diam, sec=self.soma)
len2 = self.dend.L
h.pt3dclear(sec=self.dend)
h.pt3dadd(len1, 0, 0, self.dend.diam, sec=self.dend)
h.pt3dadd(len1 + len2, 0, 0, self.dend.diam, sec=self.dend)
#
#### build_subsets, rotateZ, and set_location are gone. ####
#
#### NEW STUFF ####
#
def create_synapses(self):
"""
"""
self.syn_I= h.ExpSyn(self.dend(0.4))
self.syn_I.tau = 17
self.syn_I.e = 0
self.synlist.append(self.syn_I)
self.syn_I_inh= h.ExpSyn(self.dend(0.4))
self.syn_I_inh.tau = 5
self.syn_I_inh.e = -70
self.synlist.append(self.syn_I_inh)
self.syn_II= h.ExpSyn(self.dend(0.8))
self.syn_II.tau = 18
self.syn_II.e = 0
self.synlist.append(self.syn_II) # synlist is defined in Cell | penguinscontrol/Spinal-Cord-Modeling | ClarkesNetwork/clarke.py | Python | gpl-2.0 | 3,835 | [
"NEURON"
] | aacb5dab0a106df6167c0397fb7ff154192767de748a82613c7f31ee51c10986 |
from abc import ABCMeta, abstractmethod
"""Module description: This module contain the abstract base-classes for the
repositories in shyft, defining the contracts that a repository should
implement.
Brief intro to Repository pattern here:
http://martinfowler.com/eaaCatalog/repository.html
http://www.codeproject.com/Articles/526874/Repositorypluspattern-cplusdoneplusright
https://msdn.microsoft.com/en-us/library/ff649690.aspx
http://www.remondo.net/repository-pattern-example-csharp/
According to architecture diagram/current code we do have
repositories for
* region-model - for reading/providing the region-model, consisting of
cell/catchment information, (typicall GIS system) for a given
region/model spec.
* state - for reading region model-state, cell-level (snapshot of
internal state variables of the models).
* geo-located time-series
- for input observations,forecasts, run-off time-series, that is
useful/related to the region model. E.g. precipitation,
temperature, radiation, wind-speed, relative humidity and even
measured run-off, and other time-series that can be utilized
by the region-model. Notice that this repository can serve
most type of region-models.
* configuration - helps *orchestration* to assemble data (region, model,
sources etc) and repository impl.
We try to design the interfaces, input types, return types, so that the number
of lines needed in the orchestration part of the code is kept to a minimum.
This implies that the input arguments to the repositories are types that goes
easily with the shyft.api. The returned types should also be shyft.api
compatible types, - thus the orchestrator can just pass on values returned into
the shyft.api.
"""
class RegionModelRepository(object):
"""
Interface for RegionModel objects.
The responsibility is to provide shyft.api RegionModel objects to the
orchestrator, hiding away any implementation specific details regarding how
the model is stored (e.g. just a mock-model, a netcdf-file based model, a
GIS-system based model etc.).
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_region_model(self, region_id, catchments=None):
"""
Return a fully specified shyft api region_model for region_id.
Parameters
-----------
region_id: string
unique identifier of region in data, helps
repository to identify which model, *including model type*
and other stuff needed in order to return a fully
operational model.
catchments: list of unique integers
catchment indices when extracting a region consisting of a subset
of the catchments
Returns
-------
region_model: shyft.api type
```
# Pseudo code below
# Concrete implementation must construct cells, region_parameter
# and catchment_parameters and return a region_model
# Use data to create cells
cells = [region_model.cell_type(*args, **kwargs) for cell in region]
# Use data to create regional parameters
region_parameter = region_model.parameter_type(*args, **kwargs)
# Use data to override catchment parameters
catchment_parameters = {}
for all catchments in region:
catchment_parameters[c] = region_model.parameter_type(*args,
**kwargs)
return region_model(cells, region_parameter, catchment_parameters)
```
"""
pass
class StateInfo(object):
"""
Keeps needed information for a persisted region model state. Currently,
the StateInfo of a region model is unique to the model, but we plan to do
some magic so that you can extract part of state in order to transfer/apply
to other state.
This simple structure is utilized by the StateRepository to provide
information about the stored state
state_id
A unique identifier for this state, -note that there might be infinite
number of a state for a specific region-model/time
region_model_id
As a minimum the state contains the region_model_id that uniquely (within
context) identifies the model that the state originated from.
utc_timestamp
The point in time where the state was sampled
tags
Not so important, but useful text list that we think could be useful in
order to describe the state
"""
def __init__(self, state_id=None, region_model_id=None,
utc_timestamp=None, tags=None):
self.state_id = state_id
self.region_model_id = region_model_id
self.utc_timestamp = utc_timestamp
self.tags = tags
class StateRepository(object):
"""
Provides the needed functionality to maintain state for a region-model.
We provide simple search functionality, based on StateInfo. The class
should provide ready to use shyft.api type of state for a specified
region-model. It should also be able to stash away new states for later
use/retrieval.
"""
__metaclass__ = ABCMeta
@abstractmethod
def find_state(self, region_model_id_criteria=None,
utc_period_criteria=None, tag_criteria=None):
"""
Find the states in the repository that matches the specified criterias
(note: if we provide match-lambda type, then it's hard to use a db to
do the matching)
Parameters
----------
region_model_id_criteria:
match-lambda, or specific string, list of strings
utc_period_criteria:
match-lambda, or period
tag_criteria:
match-lambda, or list of strings ?
Returns
-------
List of StateInfo objects that matches the specified criteria
"""
pass
@abstractmethod
def get_state(self, state_id):
"""
Parameters
----------
state_id: string
unique identifier of state
Returns
-------
The state for a specified state_id, - the returned object/type can be
passed directly to the region-model
"""
pass
@abstractmethod
def put_state(self, region_model_id, utc_timestamp,
region_model_state, tags=None):
"""
Persist the state into the repository, assigning a new unique state_id,
so that it can later be retrieved by that return state_id assigned.
Parameters
----------
region_model_id: string
name of the model
utc_timestamp:utctime
time for which the state is (considered) valid
region_model_state:string
something that can be interpreted as state elsewhere
tags:list of string
optional, tags can be associated with a state so that it can be filtered later.
note: we are not sure if this is useful, so it's optional feature
Returns
-------
state_id: immutable id
Identifier that can be used as argument to get_state.
"""
pass
@abstractmethod
def delete_state(self, state_id):
"""
Delete the state associated with state_id,
Throws
------
StateRepositoryError: if invalid state_id, or not able to delete the
state (access rights)
Parameters
----------
state_id: immutable id
Identifier that uniquely identifies the state
"""
pass
class GeoTsRepository(object):
"""
Interface for GeoTsRepository (Geo Located Timeseries) objects.
Responsibility:
- to provide all hydrology relevant types of geo-located time-series,
forecasts and ensembles needed for region-model inputs/calibrations.
These are typical (but not limited to)
precipitation
temperature
wind (speed,direction)
radiation
relative humidity
snow (depth,snow water equivalent, other snow-stuff,
like coverage etc.)
runoff/discharge (historical observed, we use this for calibration)
geo-located time-series def:
A time-series where the geographic location, (area) for which the
values apply is well defined.
For historical observations, we usually have point observations
(xyz + coord-system id). Forecasts, might or might not be points, they
could be a grid-shape. So far these have been treated as points (centre
of grid-shape).
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_timeseries(self, input_source_types, utc_period, geo_location_criteria=None):
"""
Parameters
----------
input_source_types: list
List of source types to retrieve (precipitation,temperature..)
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
geo_location_criteria: object
Some type (to be decided), extent (bbox + coord.ref)
Returns
-------
geo_loc_ts: dictionary
dictionary keyed by ts type, where values are api vectors of geo
located timeseries.
Important notice: The returned time-series should at least cover the
requested period. It could return *more* data than in
the requested period, but must return sufficient data so
that the f(t) can be evaluated over the requested period.
"""
pass
@abstractmethod
def get_forecast(self, input_source_types, utc_period, t_c, geo_location_criteria=None):
"""
Parameters
----------
input_source_types: list
List of source types to retrieve (precipitation, temperature, ...)
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
t_c: long
Forecast specification; return newest forecast older than t_c.
geo_location_criteria: object
Some type (to be decided), extent (bbox + coord.ref).
Returns
-------
geo_loc_ts: dictionary
dictionary keyed by ts type, where values are api vectors of geo
located timeseries.
Important notice: The returned forecast time-series should at least cover the
requested period. It could return *more* data than in
the requested period, but must return sufficient data so
that the f(t) can be evaluated over the requested period.
"""
pass
@abstractmethod
def get_forecast_ensemble(self, input_source_types, utc_period,
t_c, geo_location_criteria=None):
"""
Parameters
----------
input_source_types: list
List of source types to retrieve (precipitation, temperature, ...)
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
t_c: long
Forecast specification; return newest forecast older than t_c.
geo_location_criteria: object
Some type (to be decided), extent (bbox + coord.ref).
Returns
-------
ensemble: list of same type as get_timeseries
Important notice: The returned forecast time-series should at least cover the
requested period. It could return *more* data than in
the requested period, but must return sufficient data so
that the f(t) can be evaluated over the requested period.
"""
pass
class InterpolationParameterRepository(object):
"""
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_parameters(self, interpolation_id):
"""
Parameters
----------
interpolation_id: identifier (int| string)
unique identifier within this repository that identifies one set of interpolation-parameters
Returns
-------
parameter: shyft.api type
Interpolation parameter object
"""
pass
class BoundingRegion(object):
__metaclass__ = ABCMeta
@abstractmethod
def bounding_box(self, epsg):
"""
Parameters
----------
epsg: string
epsg id of the resulting coordinates
Returns
-------
x: np.ndarray
x coordinates of the four corners, numbered clockwise from
upper left corner.
y: np.ndarray
x coordinates of the four corners, numbered clockwise from
upper left corner.
"""
pass
@abstractmethod
def bounding_polygon(self, epsg):
"""
Parameters
----------
epsg: string
epsg id of the resulting coordinates
Returns
-------
x: np.ndarray
x coordinates of the smallest bounding polygon of the region
y: np.ndarray
y coordinates of the smallest bounding polygon of the region
"""
pass
@abstractmethod
def epsg(self):
"""
Returns
-------
epsg: string
Epsg id of coordinate system
"""
pass
class InterfaceError(Exception):
pass
class TsStoreItem(object):
"""Represent a minimal mapping between the destination_id in the
ts-store (like SmG, ts-name), and the lambda/function that from a
supplied model extracts, and provides a ts possibly
transformed/clipped to the wanted resolution and range.
See: TimeseriesStore for usage
"""
def __init__(self, destination_id, extract_method):
"""
Parameters
----------
destination_id: string or id
meaningful for the time-series store, identifes a unique time-series
extract_method: callable
takes shyft.api model as input and return a shyft.api time-series
"""
self.destination_id = destination_id
self.extract_method = extract_method
class TimeseriesStore(object):
"""Represent a repository, that is capable of storing time-series
(with almost no metadata, or they are provided elsewhere) Typical
example would be Powel SmG, a netcdf-file-based ts-store etc.
The usage of this class would be as a final step in orchestration,
where we would like to save some simulation result to a database.
"""
def __init__(self, tss, ts_item_list):
"""
Parameters
----------
tss: TsRepository
provides a method tss.store({ts_id:shyft.api.TimeSeries})
ts_item_list: TsStoreItem
provide a list of mappings between ts_id and model extract function
ref. to TsStoreItem class for description.
"""
self.tss = tss
self.ts_item_list = ts_item_list
def store_ts(self, region_model, is_forecast=False):
"""
Extracts time-series from the region_model, according to
the ts_item_list (ref. to constructor description and TsStoreItem)
and store the result by means of the self.tss.store method.
Parameters
----------
region_model: shyft.api model type, like PTGSKModel
the model is passed to the extract-methods so that correct ts
is fetched out.
Returns
-------
True if storing all ts went well, otherwise False
"""
tsid_ts_map = {tsi.destination_id: tsi.extract_method(region_model) for tsi in self.ts_item_list}
return self.tss.store(tsid_ts_map, is_forecast)
| felixmatt/shyft | shyft/repository/interfaces.py | Python | lgpl-3.0 | 15,871 | [
"NetCDF"
] | edc598814ef2e7413d2f701bc1a2043860a64454845a1130a86d60ad530ba260 |
import sys
import tempfile
import shutil
import time
import os
import pandas
sys.path.insert(1, "../../")
import h2o
import h2o.utils.shared_utils as h2o_utils
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
genmodel_name = "h2o-genmodel.jar"
def download_mojo(model, mojo_zip_path, genmodel_path=None):
mojo_zip_path = os.path.abspath(mojo_zip_path)
parent_dir = os.path.dirname(mojo_zip_path)
print("\nDownloading MOJO @... " + parent_dir)
time0 = time.time()
if genmodel_path is None:
genmodel_path = os.path.join(parent_dir, genmodel_name)
mojo_file = model.download_mojo(path=mojo_zip_path, get_genmodel_jar=True, genmodel_name=genmodel_path)
print(" => %s (%d bytes)" % (mojo_file, os.stat(mojo_file).st_size))
assert os.path.exists(mojo_file)
print(" Time taken = %.3fs" % (time.time() - time0))
assert os.path.exists(mojo_zip_path)
print(" => %s (%d bytes)" % (mojo_zip_path, os.stat(mojo_zip_path).st_size))
assert os.path.exists(genmodel_path)
print(" => %s (%d bytes)" % (genmodel_path, os.stat(genmodel_path).st_size))
def mojo_predict_api_test(sandbox_dir):
data = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
input_csv = "%s/in.csv" % sandbox_dir
output_csv = "%s/prediction.csv" % sandbox_dir
h2o.export_file(data[1, 2:], input_csv)
data[1] = data[1].asfactor()
model = H2OGradientBoostingEstimator(distribution="bernoulli")
model.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=data)
# download mojo
model_zip_path = os.path.join(sandbox_dir, 'model.zip')
genmodel_path = os.path.join(sandbox_dir, 'h2o-genmodel.jar')
download_mojo(model, model_zip_path)
assert os.path.isfile(model_zip_path)
assert os.path.isfile(genmodel_path)
# test that we can predict using default paths
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path, verbose=True)
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path, genmodel_jar_path=genmodel_path,
verbose=True)
assert os.path.isfile(output_csv)
os.remove(model_zip_path)
os.remove(genmodel_path)
os.remove(output_csv)
# test that we can predict using custom genmodel path
other_sandbox_dir = tempfile.mkdtemp()
try:
genmodel_path = os.path.join(other_sandbox_dir, 'h2o-genmodel-custom.jar')
download_mojo(model, model_zip_path, genmodel_path)
assert os.path.isfile(model_zip_path)
assert os.path.isfile(genmodel_path)
try:
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path, verbose=True)
assert False, "There should be no h2o-genmodel.jar at %s" % sandbox_dir
except RuntimeError:
pass
assert not os.path.isfile(output_csv)
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path,
genmodel_jar_path=genmodel_path, verbose=True)
assert os.path.isfile(output_csv)
os.remove(output_csv)
output_csv = "%s/out.prediction" % other_sandbox_dir
# test that we can predict using default paths
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path,
genmodel_jar_path=genmodel_path, verbose=True, output_csv_path=output_csv)
assert os.path.isfile(output_csv)
os.remove(model_zip_path)
os.remove(genmodel_path)
os.remove(output_csv)
finally:
shutil.rmtree(other_sandbox_dir)
def mojo_predict_csv_test(target_dir):
mojo_file_name = "prostate_gbm_model.zip"
mojo_zip_path = os.path.join(target_dir, mojo_file_name)
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
r = prostate[0].runif()
train = prostate[r < 0.70]
test = prostate[r >= 0.70]
# Getting first row from test data frame
pdf = test[1, 2:]
input_csv = "%s/in.csv" % target_dir
output_csv = "%s/output.csv" % target_dir
h2o.export_file(pdf, input_csv)
# =================================================================
# Regression
# =================================================================
regression_gbm1 = H2OGradientBoostingEstimator(distribution="gaussian")
regression_gbm1.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=train)
pred_reg = regression_gbm1.predict(pdf)
p1 = pred_reg[0, 0]
print("Regression prediction: " + str(p1))
download_mojo(regression_gbm1, mojo_zip_path)
print("\nPerforming Regression Prediction using MOJO @... " + target_dir)
prediction_result = h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=mojo_zip_path,
output_csv_path=output_csv)
print("Prediction result: " + str(prediction_result))
assert p1 == float(prediction_result[0]['predict']), "expected predictions to be the same for binary and MOJO model for regression"
# =================================================================
# Binomial
# =================================================================
train[1] = train[1].asfactor()
bernoulli_gbm1 = H2OGradientBoostingEstimator(distribution="bernoulli")
bernoulli_gbm1.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=train)
pred_bin = bernoulli_gbm1.predict(pdf)
binary_prediction_0 = pred_bin[0, 1]
binary_prediction_1 = pred_bin[0, 2]
print("Binomial prediction: p0: " + str(binary_prediction_0))
print("Binomial prediction: p1: " + str(binary_prediction_1))
download_mojo(bernoulli_gbm1, mojo_zip_path)
print("\nPerforming Binomial Prediction using MOJO @... " + target_dir)
prediction_result = h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=mojo_zip_path,
output_csv_path=output_csv)
mojo_prediction_0 = float(prediction_result[0]['0'])
mojo_prediction_1 = float(prediction_result[0]['1'])
print("Binomial prediction: p0: " + str(mojo_prediction_0))
print("Binomial prediction: p1: " + str(mojo_prediction_1))
assert binary_prediction_0 == mojo_prediction_0, "expected predictions to be the same for binary and MOJO model for Binomial - p0"
assert binary_prediction_1 == mojo_prediction_1, "expected predictions to be the same for binary and MOJO model for Binomial - p1"
# =================================================================
# Multinomial
# =================================================================
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
r = iris[0].runif()
train = iris[r < 0.90]
test = iris[r >= 0.10]
# Getting first row from test data frame
pdf = test[1, 0:4]
input_csv = "%s/in-multi.csv" % target_dir
output_csv = "%s/output.csv" % target_dir
h2o.export_file(pdf, input_csv)
multi_gbm = H2OGradientBoostingEstimator()
multi_gbm.train(x=['C1', 'C2', 'C3', 'C4'], y='C5', training_frame=train)
pred_multi = multi_gbm.predict(pdf)
multinomial_prediction_1 = pred_multi[0, 1]
multinomial_prediction_2 = pred_multi[0, 2]
multinomial_prediction_3 = pred_multi[0, 3]
print("Multinomial prediction (Binary): p0: " + str(multinomial_prediction_1))
print("Multinomial prediction (Binary): p1: " + str(multinomial_prediction_2))
print("Multinomial prediction (Binary): p2: " + str(multinomial_prediction_3))
download_mojo(multi_gbm, mojo_zip_path)
print("\nPerforming Binomial Prediction using MOJO @... " + target_dir)
prediction_result = h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=mojo_zip_path,
output_csv_path=output_csv)
mojo_prediction_1 = float(prediction_result[0]['Iris-setosa'])
mojo_prediction_2 = float(prediction_result[0]['Iris-versicolor'])
mojo_prediction_3 = float(prediction_result[0]['Iris-virginica'])
print("Multinomial prediction (MOJO): p0: " + str(mojo_prediction_1))
print("Multinomial prediction (MOJO): p1: " + str(mojo_prediction_2))
print("Multinomial prediction (MOJO): p2: " + str(mojo_prediction_3))
assert multinomial_prediction_1 == mojo_prediction_1, "expected predictions to be the same for binary and MOJO model for Multinomial - p0"
assert multinomial_prediction_2 == mojo_prediction_2, "expected predictions to be the same for binary and MOJO model for Multinomial - p1"
assert multinomial_prediction_3 == mojo_prediction_3, "expected predictions to be the same for binary and MOJO model for Multinomial - p2"
def mojo_predict_pandas_test(sandbox_dir):
data = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
input_csv = "%s/in.csv" % sandbox_dir
pdf = data[1, 2:]
h2o.export_file(pdf, input_csv)
data[1] = data[1].asfactor()
model = H2OGradientBoostingEstimator(distribution="bernoulli")
model.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=data)
h2o_prediction = model.predict(pdf)
# download mojo
model_zip_path = os.path.join(sandbox_dir, 'model.zip')
genmodel_path = os.path.join(sandbox_dir, 'h2o-genmodel.jar')
download_mojo(model, model_zip_path)
assert os.path.isfile(model_zip_path)
assert os.path.isfile(genmodel_path)
pandas_frame = pandas.read_csv(input_csv)
mojo_prediction = h2o_utils.mojo_predict_pandas(dataframe=pandas_frame, mojo_zip_path=model_zip_path, genmodel_jar_path=genmodel_path)
print("Binomial Prediction (Binary) - p0: %f" % h2o_prediction[0,1])
print("Binomial Prediction (Binary) - p1: %f" % h2o_prediction[0,2])
print("Binomial Prediction (MOJO) - p0: %f" % mojo_prediction['0'].iloc[0])
print("Binomial Prediction (MOJO) - p1: %f" % mojo_prediction['1'].iloc[0])
assert h2o_prediction[0,1] == mojo_prediction['0'].iloc[0], "expected predictions to be the same for binary and MOJO model - p0"
assert h2o_prediction[0,2] == mojo_prediction['1'].iloc[0], "expected predictions to be the same for binary and MOJO model - p0"
csv_test_dir = tempfile.mkdtemp()
api_test_dir = tempfile.mkdtemp()
pandas_test_dir = tempfile.mkdtemp()
try:
if __name__ == "__main__":
pyunit_utils.standalone_test(lambda: mojo_predict_api_test(api_test_dir))
pyunit_utils.standalone_test(lambda: mojo_predict_csv_test(csv_test_dir))
pyunit_utils.standalone_test(lambda: mojo_predict_pandas_test(pandas_test_dir))
else:
mojo_predict_api_test(api_test_dir)
mojo_predict_csv_test(csv_test_dir)
mojo_predict_pandas_test(pandas_test_dir)
finally:
shutil.rmtree(csv_test_dir)
shutil.rmtree(api_test_dir)
shutil.rmtree(pandas_test_dir)
| spennihana/h2o-3 | h2o-py/tests/testdir_misc/pyunit_mojo_predict.py | Python | apache-2.0 | 10,966 | [
"Gaussian"
] | 7cfb2c5541ba1fb025821919f447ae540c21c31f924af1f0cd600961765e2c51 |
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
from .. import utils
from ..config import DEFAULT_POPULATION_COLORS
class NeuroVis(object):
'''This class is used to visualize firing activity of single neurons.
This class implements several conveniences for visualizing firing
activity of single neurons.
Args:
spiketimes (Numpy array): Array of spike times.
name (str): The name of the visualization.
'''
def __init__(self, spiketimes, name='neuron'):
self.name = name
self.spiketimes = np.squeeze(np.sort(spiketimes))
n_seconds = (self.spiketimes[-1] - self.spiketimes[0])
n_spikes = np.size(spiketimes)
self.firingrate = (n_spikes / n_seconds)
def get_raster(self, event=None, conditions=None, df=None,
window=[-100, 500], binsize=10, plot=True,
sortby=None, sortorder='descend'):
'''Computes the raster and plots it.
Args:
event (str): Column/key name of DataFrame/dictionary "data" which
contains event times in milliseconds (e.g.
stimulus/trial/fixation onset, etc.)
conditions (str): Column/key name of DataFrame/dictionary
:data:`data` which contains the conditions by which the trials
must be grouped.
df (DataFrame or dictionary): The dataframe containing the data,
or a dictionary with the equivalent structure.
window (list of 2 elements): Time interval to consider, in
milliseconds.
binsize (int): Bin size in milliseconds
plot (bool): If True then plot
sortby (str or list): If :data:`rate`, sort by firing rate. If
:data:`latency`, sort by peak latency. If a list, integers to
be used as sorting indices.
sortorder (str): Direction to sort, either :data:`descend` or
:data:`ascend`.
Returns:
dict: :data:`rasters` with keys :data:`event`, :data:`conditions`,
:data:`binsize`, :data:`window`, and :data:`data`.
:data:`rasters['data']` is a dictionary where each value is a
raster for each unique entry of :data:`df['conditions']`.
'''
if not type(df) is dict:
df = df.reset_index()
window = [np.floor(window[0] / binsize) * binsize,
np.ceil(window[1] / binsize) * binsize]
# Get a set of binary indicators for trials of interest
if conditions:
trials = dict()
for cond_id in np.sort(df[conditions].unique()):
trials[cond_id] = \
np.where((df[conditions] == cond_id).apply(
lambda x: (0, 1)[x]).values)[0]
else:
trials = dict()
trials[0] = np.where(np.ones(np.size(df[event])))[0]
# Initialize rasters
rasters = {
'event': event,
'conditions': conditions,
'window': window,
'binsize': binsize,
'data': {},
}
# Loop over each raster
for cond_id in trials:
# Select events relevant to this raster
selected_events = df[event][trials[cond_id]]
raster = []
bin_template = 1e-3 * \
np.arange(window[0], window[1] + binsize, binsize)
for event_time in selected_events:
bins = event_time + bin_template
# consider only spikes within window
searchsorted_idx = np.squeeze(np.searchsorted(self.spiketimes,
[event_time + 1e-3 *
window[0],
event_time + 1e-3 *
window[1]]))
# bin the spikes into time bins
spike_counts = np.histogram(
self.spiketimes[searchsorted_idx[0]:searchsorted_idx[1]],
bins)[0]
raster.append(spike_counts)
rasters['data'][cond_id] = np.array(raster)
# Show the raster
if plot is True:
self.plot_raster(rasters, cond_id=None, sortby=sortby,
sortorder=sortorder)
# Return all the rasters
return rasters
def plot_raster(self, rasters, cond_id=None, cond_name=None, sortby=None,
sortorder='descend', cmap='Greys', has_title=True):
'''Plot a single raster.
Args:
rasters (dict): Output of get_raster method
cond_id (str): Which raster to plot indicated by the key in
:data:`rasters['data']`. If None then all are plotted.
cond_name (str): Name to appear in the title.
sortby (str or list): If :data:`rate`, sort by firing rate. If
:data:`latency`, sort by peak latency. If a list, integers to
be used as sorting indices.
sortorder (str): Direction to sort in, either :data:`descend` or
:data:`ascend`.
cmap (str): Colormap for raster.
has_title (bool): If True then adds title.
'''
window = rasters['window']
binsize = rasters['binsize']
xtics = [window[0], 0, window[1]]
xtics = [str(i) for i in xtics]
xtics_loc = [-0.5, (-window[0]) / binsize - 0.5,
(window[1] - window[0]) / binsize - 0.5]
if cond_id is None:
for cond in list(rasters['data']):
self.plot_raster(rasters, cond_id=cond, cond_name=cond_name,
sortby=sortby, sortorder=sortorder, cmap=cmap,
has_title=has_title)
plt.show()
else:
raster = rasters['data'][cond_id]
if len(raster) > 0:
sort_idx = utils.get_sort_indices(
data=raster,
by=sortby,
order=sortorder,
)
raster_sorted = raster[sort_idx]
plt.imshow(raster_sorted, aspect='auto',
interpolation='none', cmap=plt.get_cmap(cmap))
plt.axvline(
(-window[0]) / binsize - 0.5, color='r', linestyle='--')
plt.ylabel('trials')
plt.xlabel('time [ms]')
plt.xticks(xtics_loc, xtics)
if has_title:
if cond_id:
if cond_name:
plt.title('neuron %s. %s' %
(self.name, cond_name))
else:
plt.title('neuron %s. %s: %s' %
(self.name, rasters['conditions'],
cond_id))
else:
plt.title('neuron %s' % self.name)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.tick_params(axis='x', which='both', top='off')
plt.tick_params(axis='y', which='both', right='off')
else:
print('No trials for this condition!')
def get_psth(self, event=None, df=None, conditions=None, cond_id=None,
window=[-100, 500], binsize=10, plot=True, event_name=None,
conditions_names=None, ylim=None,
colors=DEFAULT_POPULATION_COLORS):
'''Compute the PSTH and plot it.
Args:
event (str): Column/key name of DataFrame/dictionary :data:`data`
which contains event times in milliseconds (e.g.
stimulus/trial/fixation onset, etc.)
conditions (str): Column/key name of DataFrame/dictionary
:data:`data` which contains the conditions by which the trials
must be grouped.
cond_id (list): Which psth to plot indicated by the key in
:data:`all_psth['data']``. If None then all are plotted.
df (DataFrame or dictionary): The dataframe containing the data.
window (list of 2 elements): Time interval to consider, in
milliseconds.
binsize (int): Bin size in milliseconds.
plot (bool): If True then plot.
event_name (string): Legend name for event. Default is the actual
event name
conditions_names (TODO): Legend names for conditions. Default are
the unique values in :data:`df['conditions']`.
ylim (list): The lower and upper limits for Y.
colors (list): The colors for the plot.
Returns:
dict: :data:`rasters` with keys :data:`event`, :data:`conditions`,
:data:`binsize`, :data:`window`, and :data:`data`.
:data:`rasters['data']` is a dictionary where each value is a
raster for each unique entry of :data:`df['conditions']`.
'''
window = [np.floor(window[0] / binsize) * binsize,
np.ceil(window[1] / binsize) * binsize]
# Get all the rasters first
rasters = self.get_raster(event=event, df=df,
conditions=conditions,
window=window, binsize=binsize, plot=False)
# Initialize PSTH
psth = dict()
psth['window'] = window
psth['binsize'] = binsize
psth['event'] = event
psth['conditions'] = conditions
psth['data'] = dict()
# Compute the PSTH
for cond_id in np.sort(list(rasters['data'])):
psth['data'][cond_id] = dict()
raster = rasters['data'][cond_id]
mean_psth = np.mean(raster, axis=0) / (1e-3 * binsize)
std_psth = np.sqrt(np.var(raster, axis=0)) / (1e-3 * binsize)
sem_psth = std_psth / np.sqrt(float(np.shape(raster)[0]))
psth['data'][cond_id]['mean'] = mean_psth
psth['data'][cond_id]['sem'] = sem_psth
if plot is True:
if not event_name:
event_name = event
conditions_names = list(psth['data'])
self.plot_psth(psth, ylim=ylim, event_name=event_name,
conditions_names=conditions_names,
colors=colors)
return psth
def plot_psth(self, psth, event_name='event_onset', conditions_names=None,
cond_id=None, ylim=None, colors=DEFAULT_POPULATION_COLORS):
'''Plots PSTH.
Args:
psth (dict): Output of :meth:`get_psth`.
event_name (string): Legend name for event. Default is the actual
event name.
conditions_names (list of str): Legend names for conditions.
Default are the keys in :data:`psth['data']`.
cond_id (list): Which psth to plot indicated by the key in
:data:`all_psth['data']``. If None then all are plotted.
ylim (list): The lower and upper limits for Y.
colors (list): The colors for the plot.
'''
window = psth['window']
binsize = psth['binsize']
conditions = psth['conditions']
if cond_id is None:
keys = np.sort(list(psth['data'].keys()))
else:
keys = cond_id
if conditions_names is None:
conditions_names = keys
scale = 0.1
y_min = (1.0 - scale) * np.nanmin([np.min(
psth['data'][psth_idx]['mean'])
for psth_idx in psth['data']])
y_max = (1.0 + scale) * np.nanmax([np.max(
psth['data'][psth_idx]['mean'])
for psth_idx in psth['data']])
legend = [event_name]
time_bins = np.arange(window[0], window[1], binsize) + binsize / 2.0
if ylim:
plt.plot([0, 0], ylim, color='k', ls='--')
else:
plt.plot([0, 0], [y_min, y_max], color='k', ls='--')
for i, cond_id in enumerate(keys):
if np.all(np.isnan(psth['data'][cond_id]['mean'])):
plt.plot(0, 0, alpha=1.0, color=colors[i % len(colors)])
else:
plt.plot(time_bins, psth['data'][cond_id]['mean'],
color=colors[i % len(colors)], lw=1.5)
for i, cond_id in enumerate(keys):
if conditions is not None:
if conditions_names is not None:
legend.append('%s' % conditions_names[i])
else:
legend.append('%s' % str(cond_id))
else:
legend.append('all')
if not np.all(np.isnan(psth['data'][cond_id]['mean'])):
plt.fill_between(time_bins, psth['data'][cond_id]['mean'] -
psth['data'][cond_id]['sem'],
psth['data'][cond_id]['mean'] +
psth['data'][cond_id]['sem'],
color=colors[i % len(colors)],
alpha=0.2)
if conditions:
plt.title('neuron %s: %s' % (self.name, conditions))
else:
plt.title('neuron %s' % self.name)
plt.xlabel('time [ms]')
plt.ylabel('spikes per second [spks/s]')
if ylim:
plt.ylim(ylim)
else:
plt.ylim([y_min, y_max])
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tick_params(axis='y', right='off')
plt.tick_params(axis='x', top='off')
plt.legend(legend, frameon=False)
def get_spikecounts(self, event=None, df=None,
window=np.array([50.0, 100.0])):
'''Counts spikes in the dataframe.
Args:
event (str): Column/key name of DataFrame/dictionary :data:`data`
which contains event times in milliseconds (e.g.
stimulus/trial/fixation onset, etc.)
window (list of 2 elements): Time interval to consider, in
milliseconds.
Return:
array: An :data:`n x 1` array of spike counts.
'''
events = df[event].values
spiketimes = self.spiketimes
spikecounts = np.asarray([
np.sum(np.all((
spiketimes >= e + 1e-3 * window[0],
spiketimes <= e + 1e-3 * window[1],
), axis=0))
for e in events
])
return spikecounts
| codekansas/spykes | spykes/plot/neurovis.py | Python | mit | 14,947 | [
"NEURON"
] | 3a879f5c2e0285639585bd54b0c36b4c3ee49a94ddcfaad32930202b8925848f |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the save_trajs function of the coordinates API by comparing
the direct, sequential retrieval of frames via mdtraj.load_frame() vs
the retrival via save_traj
@author: gph82, clonker
"""
import unittest
import os
import shutil
import tempfile
import pkg_resources
import numpy as np
import pyemma
import pyemma.coordinates as coor
import mdtraj as md
from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, \
compare_coords_md_trajectory_objects
from pyemma.coordinates.api import save_traj
from pyemma.coordinates.tests.util import create_traj, get_top
class TestSaveTraj(unittest.TestCase):
def setUp(self):
self.eps = 1e-10
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
self.pdbfile = os.path.join(path, 'bpti_ca.pdb')
self.trajfiles = [os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_034-066.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
# Create random sets of files and frames to be retrieved from trajfiles
n_members_set1 = 10
n_members_set2 = 20
from pyemma.util.contexts import numpy_random_seed
with numpy_random_seed(34):
set_1 = np.vstack((np.random.permutation([0, 2] * n_members_set1)[:n_members_set1],
np.random.randint(32, size=n_members_set1))).T
set_2 = np.vstack((np.random.permutation([0, 2] * n_members_set2)[:n_members_set2],
np.random.randint(32, size=n_members_set2))).T
self.sets = [set_1, set_2]
self.subdir = tempfile.mkdtemp(suffix='save_trajs_test/')
self.outfile = os.path.join(self.subdir, 'save_traj_test.xtc')
# Instantiate the reader
self.reader = coor.source(self.trajfiles, top=self.pdbfile)
self.reader.chunksize = 30
self.n_pass_files = [self.subdir + 'n_pass.set_%06u.xtc' % ii for ii in range(len(self.sets))]
self.one_pass_files = [self.subdir + '1_pass.set_%06u.xtc' % ii for ii in range(len(self.sets))]
self.traj_ref = save_traj_w_md_load_frame(self.reader, self.sets)
self.strides = [2, 3, 5]
def tearDown(self):
shutil.rmtree(self.subdir, ignore_errors=True)
def test_reader_input_save_IO(self):
# Test that we're saving to disk alright
save_traj(self.reader, self.sets, self.outfile)
exist = os.stat(self.outfile)
self.assertTrue(exist, "Could not write to disk")
def test_reader_input_returns_trajectory(self):
self.assertTrue(isinstance(save_traj(self.reader, self.sets, None),
md.Trajectory))
@unittest.skip("broken mdtraj-1.8")
def test_reader_input_returns_trajectory_w_image_molecules(self):
self.assertTrue(isinstance(save_traj(self.reader, self.sets, None, image_molecules=True),
md.Trajectory))
def test_list_input_save_IO(self):
# Test that we're saving to disk alright
save_traj(self.trajfiles, self.sets, self.outfile, top=self.pdbfile)
exist = os.stat(self.outfile)
self.assertTrue(exist, "Could not write to disk")
def test_list_input_returns_trajectory(self):
self.assertTrue(isinstance(save_traj(self.trajfiles, self.sets, None, top=self.pdbfile),
md.Trajectory))
def test_reader_input_save_correct_frames_disk(self):
save_traj(self.reader, self.sets, self.outfile)
# Reload the object to memory
traj = md.load(self.outfile, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_reader_input_save_correct_frames_mem(self):
# Keep object in memory
traj = save_traj(self.reader, self.sets, None)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_list_input_save_correct_frames_disk(self):
save_traj(self.trajfiles, self.sets, self.outfile, top=self.pdbfile)
# Reload the object to memory
traj = md.load(self.outfile, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_list_input_save_correct_frames_mem(self):
# Keep object in memory
traj = save_traj(self.trajfiles, self.sets, None, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_reader_input_save_correct_frames_with_stride_in_memory(self):
# With the inmemory option = True
for stride in self.strides[:]:
# Since none of the trajfiles have more than 30 frames, the frames have to be re-drawn for every stride
sets = np.copy(self.sets)
sets[0][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[0])[0])
sets[1][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[1])[0])
traj = save_traj(self.reader, sets, None,
stride=stride, verbose=False)
# Also the reference has to be re-drawn using the stride. For this, we use the re-scale the strided
# frame-indexes to the unstrided value
sets[0][:, 1] *= stride
sets[1][:, 1] *= stride
traj_ref = save_traj_w_md_load_frame(self.reader, sets)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj, traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_with_fragmented_reader(self):
from pyemma.util.files import TemporaryDirectory
trajlen = 35
# trajectory 0 (first trajectory, is trajfiles[2])
# -> skipped
# trajectory 1 (second trajectory, is {trajfiles[0], trajfiles[1]})
# fragment 1:
# -> frames 0,1,2,10
# fragment 2:
# -> frames 1 (i.e., 36) and 34 (i.e., 69)
# trajectory 2 (third trajectory, is trajfiles[2])
# -> frame 5
ra_indices = np.array([[1, 0], [1, 1], [1,2], [1, 10], [1, trajlen+1], [1, 2*trajlen-1], [2, 5]], dtype=int)
with TemporaryDirectory() as td:
trajfiles = []
xyzs = []
for i in range(3):
tf, xyz, _ = create_traj(start=i * 10, dir=td, length=trajlen)
trajfiles.append(tf)
xyzs.append(xyz)
topfile = get_top()
frag_traj = [trajfiles[2], [trajfiles[0], trajfiles[1]], trajfiles[2]]
expected = xyzs[0][np.array([0, 1, 2, 10]), :], xyzs[1][np.array([1, 34])], np.array([(xyzs[2][5, :])])
expected = np.vstack(expected)
reader = coor.source(frag_traj, top=topfile)
for cs in range(1,10):
traj = save_traj(reader, ra_indices, None, chunksize=cs)
np.testing.assert_almost_equal(traj.xyz, expected)
def test_with_fragmented_reader_chunksize_0(self):
# intentionally group bpti dataset to a fake fragmented traj
frag_traj = [[self.trajfiles[0], self.trajfiles[1]], self.trajfiles[2], self.trajfiles[2]]
reader = coor.source(frag_traj, top=self.pdbfile, chunksize=0)
assert reader.chunksize == 0
traj = save_traj(reader, self.sets, None)
traj_ref = save_traj_w_md_load_frame(self.reader, self.sets)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj, traj_ref, atom=0)
np.testing.assert_equal(traj.xyz, traj_ref.xyz)
self.assertFalse(found_diff, errmsg)
def test_invalid_maximum_traj_index(self):
frag_traj = [[self.trajfiles[0], self.trajfiles[1]], self.trajfiles[2], self.trajfiles[2]]
set = [[0,2], [0,1], [2,42]]
from pyemma.coordinates.data.fragmented_trajectory_reader import FragmentedTrajectoryReader
reader = FragmentedTrajectoryReader(frag_traj, topologyfile=self.pdbfile)
with self.assertRaises(ValueError) as cm:
save_traj(reader, set, None)
self.assertIn("larger than", cm.exception.args[0])
def test_invalid_readers_in_frag_traj(self):
data = [np.array([[[1,2], [3,4]],[0,1]])]
from pyemma.coordinates.data.fragmented_trajectory_reader import FragmentedTrajectoryReader
reader = FragmentedTrajectoryReader(data)
with self.assertRaises(ValueError) as cm:
save_traj(reader, self.sets, None)
self.assertIn("FeatureReader", cm.exception.args[0])
if __name__ == "__main__":
unittest.main()
| markovmodel/PyEMMA | pyemma/coordinates/tests/test_save_traj.py | Python | lgpl-3.0 | 9,821 | [
"MDTraj"
] | 66c823ea5e23bf7f9b2303b59c8c21245b5ab7fd0b5f53742a7104f4513649f8 |
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, division, print_function
from licensedcode.match import get_texts
from licensedcode.match import LicenseMatch
from licensedcode.seq import match_blocks
from licensedcode.spans import Span
TRACE = False
TRACE2 = False
def logger_debug(*args): pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args))
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
"""
Matching strategy using pair-wise multiple local sequences alignment and diff-
like approaches.
"""
MATCH_SEQ = '3-seq'
def match_sequence(idx, candidate, query_run, start_offset=0):
"""
Return a list of LicenseMatch by matching the `query_run` tokens sequence
against the `idx` index for the `candidate` rule tuple (rid, rule,
intersection).
"""
if not candidate:
return []
rid, rule, _intersection = candidate
high_postings = idx.high_postings_by_rid[rid]
itokens = idx.tids_by_rid[rid]
len_junk = idx.len_junk
qbegin = query_run.start + start_offset
qfinish = query_run.end
qtokens = query_run.query.tokens
matches = []
qstart = qbegin
qlen = len(query_run)
# match as long as long we find alignments and have high matchable tokens
# this allows to find repeated instances of the same rule in the query run
query_run_matchables = query_run.matchables
while qstart <= qfinish:
if not query_run_matchables:
break
block_matches = match_blocks(qtokens, itokens, qstart, qlen, high_postings, len_junk, query_run_matchables)
if not block_matches:
break
if TRACE2:
logger_debug('block_matches:')
for m in block_matches:
i, j, k = m
print(m)
print('qtokens:', ' '.join(idx.tokens_by_tid[t] for t in qtokens[i:i + k]))
print('itokens:', ' '.join(idx.tokens_by_tid[t] for t in itokens[j:j + k]))
# create one match for each matching block: this not entirely correct
# but this will be sorted out at LicenseMatch merging and filtering time
for qpos, ipos, mlen in block_matches:
qspan = Span(range(qpos, qpos + mlen))
iposses = range(ipos, ipos + mlen)
hispan = Span(p for p in iposses if itokens[p] >= len_junk)
ispan = Span(iposses)
match = LicenseMatch(rule, qspan, ispan, hispan, qbegin, MATCH_SEQ)
if TRACE2:
qt, it = get_texts(match, location=query_run.query.location, query_string=query_run.query.query_string, idx=idx)
print('###########################')
print(match)
print('###########################')
print(qt)
print('###########################')
print(it)
print('###########################')
matches.append(match)
qstart = max([qstart, qspan.end + 1])
if TRACE: map(logger_debug, matches)
return matches
| yasharmaster/scancode-toolkit | src/licensedcode/match_seq.py | Python | apache-2.0 | 4,556 | [
"VisIt"
] | 4edac3589510be7ca1d16ac5a5b7067a07abf847f7d4b1e232c3b0e53b298b90 |
#!/usr/bin/env python
import logging
from argparse import ArgumentParser
import theano
from theano import tensor
import theano.tensor as T
import blocks
from blocks.algorithms import GradientDescent, Adam
from blocks.bricks import MLP, Tanh, WEIGHT, Rectifier
from blocks.initialization import Constant, NdarrayInitialization, Sparse, Orthogonal
from fuel.streams import DataStream
from fuel.datasets import MNIST
from fuel.schemes import SequentialScheme
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph, apply_dropout
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Timing, Printing
from blocks.extensions.saveload import Dump, LoadFromDump
from blocks.extensions.monitoring import (DataStreamMonitoring,
TrainingDataMonitoring)
from blocks.extensions.plot import Plot
from blocks.main_loop import MainLoop
from blocks.bricks.cost import BinaryCrossEntropy
from blocks.bricks import Sigmoid
import fuel
import os
from fuel.datasets.hdf5 import H5PYDataset
floatX = theano.config.floatX
import numpy as np
import cPickle as pickle
#-----------------------------------------------------------------------------
from blocks.bricks import Initializable, Random, Linear
from blocks.bricks.base import application
class Qlinear(Initializable):
"""
brick to handle the intermediate layer of an Autoencoder.
In this brick a simple linear mix is performed (a kind of PCA.)
"""
def __init__(self, input_dim, output_dim, **kwargs):
super(Qlinear, self).__init__(**kwargs)
self.mean_transform = Linear(
name=self.name+'_mean',
input_dim=input_dim, output_dim=output_dim,
weights_init=self.weights_init, biases_init=self.biases_init,
use_bias=True)
self.children = [self.mean_transform]
def get_dim(self, name):
if name == 'input':
return self.mean_transform.get_dim('input')
elif name == 'output':
return self.mean_transform.get_dim('output')
else:
raise ValueError
@application(inputs=['x'], outputs=['z', 'kl_term'])
def sample(self, x):
"""Sampling is trivial in this case
"""
mean = self.mean_transform.apply(x)
z = mean
# Calculate KL
batch_size = x.shape[0]
kl = T.zeros((batch_size,),dtype=floatX)
return z, kl
@application(inputs=['x'], outputs=['z'])
def mean_z(self, x):
return self.mean_transform.apply(x)
class Qsampler(Qlinear, Random):
"""
brick to handle the intermediate layer of an Autoencoder.
The intermidate layer predict the mean and std of each dimension
of the intermediate layer and then sample from a normal distribution.
"""
# Special brick to handle Variatonal Autoencoder statistical sampling
def __init__(self, input_dim, output_dim, **kwargs):
super(Qsampler, self).__init__(input_dim, output_dim, **kwargs)
self.prior_mean = 0.
self.prior_log_sigma = 0.
self.log_sigma_transform = Linear(
name=self.name+'_log_sigma',
input_dim=input_dim, output_dim=output_dim,
weights_init=self.weights_init, biases_init=self.biases_init,
use_bias=True)
self.children.append(self.log_sigma_transform)
@application(inputs=['x'], outputs=['z', 'kl_term'])
def sample(self, x):
"""Return a samples and the corresponding KL term
Parameters
----------
x :
Returns
-------
z : tensor.matrix
Samples drawn from Q(z|x)
kl : tensor.vector
KL(Q(z|x) || P_z)
"""
mean = self.mean_transform.apply(x)
log_sigma = self.log_sigma_transform.apply(x)
batch_size = x.shape[0]
dim_z = self.get_dim('output')
# Sample from mean-zeros std.-one Gaussian
u = self.theano_rng.normal(
size=(batch_size, dim_z),
avg=0., std=1.)
z = mean + tensor.exp(log_sigma) * u
# Calculate KL
kl = (
self.prior_log_sigma - log_sigma
+ 0.5 * (
tensor.exp(2 * log_sigma) + (mean - self.prior_mean) ** 2
) / tensor.exp(2 * self.prior_log_sigma)
- 0.5
).sum(axis=-1)
return z, kl
#-----------------------------------------------------------------------------
class VAEModel(Initializable):
"""
A brick to perform the entire auto-encoding process
"""
def __init__(self,
encoder_mlp, sampler,
decoder_mlp, **kwargs):
super(VAEModel, self).__init__(**kwargs)
self.encoder_mlp = encoder_mlp
self.sampler = sampler
self.decoder_mlp = decoder_mlp
self.children = [self.encoder_mlp, self.sampler, self.decoder_mlp]
def get_dim(self, name):
if name in ['z', 'z_mean', 'z_log_sigma']:
return self.sampler.get_dim('output')
elif name == 'kl':
return 0
else:
super(VAEModel, self).get_dim(name)
@application(inputs=['features'], outputs=['reconstruction', 'kl_term'])
def reconstruct(self, features):
enc = self.encoder_mlp.apply(features)
z, kl = self.sampler.sample(enc)
x_recons = self.decoder_mlp.apply(z)
x_recons.name = "reconstruction"
kl.name = "kl"
return x_recons, kl
@application(inputs=['features'], outputs=['z', 'enc'])
def mean_z(self, features):
enc = self.encoder_mlp.apply(features)
z = self.sampler.mean_z(enc)
return z, enc
#-----------------------------------------------------------------------------
def shnum(value):
""" Convert a float into a short tag-usable string representation. E.g.:
0 ->
0.1 -> 11
0.01 -> 12
0.001 -> 13
0.005 -> 53
"""
if value <= 0.:
return '0'
exp = np.floor(np.log10(value))
leading = ("%e"%value)[0]
return "%s%d" % (leading, -exp)
def main(name, model, epochs, batch_size, learning_rate, bokeh, layers, gamma,
rectifier, predict, dropout, qlinear, sparse):
runname = "vae%s-L%s%s%s%s-l%s-g%s-b%d" % (name, layers,
'r' if rectifier else '',
'd' if dropout else '',
'l' if qlinear else '',
shnum(learning_rate), shnum(gamma), batch_size//100)
if rectifier:
activation = Rectifier()
full_weights_init = Orthogonal()
else:
activation = Tanh()
full_weights_init = Orthogonal()
if sparse:
runname += '-s%d'%sparse
weights_init = Sparse(num_init=sparse, weights_init=full_weights_init)
else:
weights_init = full_weights_init
layers = map(int,layers.split(','))
encoder_layers = layers[:-1]
encoder_mlp = MLP([activation] * (len(encoder_layers)-1),
encoder_layers,
name="MLP_enc", biases_init=Constant(0.), weights_init=weights_init)
enc_dim = encoder_layers[-1]
z_dim = layers[-1]
if qlinear:
sampler = Qlinear(input_dim=enc_dim, output_dim=z_dim, biases_init=Constant(0.), weights_init=full_weights_init)
else:
sampler = Qsampler(input_dim=enc_dim, output_dim=z_dim, biases_init=Constant(0.), weights_init=full_weights_init)
decoder_layers = layers[:] ## includes z_dim as first layer
decoder_layers.reverse()
decoder_mlp = MLP([activation] * (len(decoder_layers)-2) + [Sigmoid()],
decoder_layers,
name="MLP_dec", biases_init=Constant(0.), weights_init=weights_init)
vae = VAEModel(encoder_mlp, sampler, decoder_mlp)
vae.initialize()
x = tensor.matrix('features')
if predict:
mean_z, enc = vae.mean_z(x)
# cg = ComputationGraph([mean_z, enc])
newmodel = Model([mean_z,enc])
else:
x_recons, kl_terms = vae.reconstruct(x)
recons_term = BinaryCrossEntropy().apply(x, x_recons)
recons_term.name = "recons_term"
cost = recons_term + kl_terms.mean()
cg = ComputationGraph([cost])
if gamma > 0:
weights = VariableFilter(roles=[WEIGHT])(cg.variables)
cost += gamma * blocks.theano_expressions.l2_norm(weights)
cost.name = "nll_bound"
newmodel = Model(cost)
if dropout:
weights = [v for k,v in newmodel.get_params().iteritems()
if k.find('MLP')>=0 and k.endswith('.W') and not k.endswith('MLP_enc/linear_0.W')]
cg = apply_dropout(cg,weights,0.5)
target_cost = cg.outputs[0]
else:
target_cost = cost
if name == 'mnist':
if predict:
train_ds = MNIST("train")
else:
train_ds = MNIST("train", sources=['features'])
test_ds = MNIST("test")
else:
datasource_dir = os.path.join(fuel.config.data_path, name)
datasource_fname = os.path.join(datasource_dir , name+'.hdf5')
if predict:
train_ds = H5PYDataset(datasource_fname, which_set='train')
else:
train_ds = H5PYDataset(datasource_fname, which_set='train', sources=['features'])
test_ds = H5PYDataset(datasource_fname, which_set='test')
train_s = DataStream(train_ds,
iteration_scheme=SequentialScheme(
train_ds.num_examples, batch_size))
test_s = DataStream(test_ds,
iteration_scheme=SequentialScheme(
test_ds.num_examples, batch_size))
if predict:
from itertools import chain
fprop = newmodel.get_theano_function()
allpdata = None
alledata = None
f = train_s.sources.index('features')
assert f == test_s.sources.index('features')
sources = test_s.sources
alllabels = dict((s,[]) for s in sources if s != 'features')
for data in chain(train_s.get_epoch_iterator(), test_s.get_epoch_iterator()):
for s,d in zip(sources,data):
if s != 'features':
alllabels[s].extend(list(d))
pdata, edata = fprop(data[f])
if allpdata is None:
allpdata = pdata
else:
allpdata = np.vstack((allpdata, pdata))
if alledata is None:
alledata = edata
else:
alledata = np.vstack((alledata, edata))
print 'Saving',allpdata.shape,'intermidiate layer, for all training and test examples, to',name+'_z.npy'
np.save(name+'_z', allpdata)
print 'Saving',alledata.shape,'last encoder layer to',name+'_e.npy'
np.save(name+'_e', alledata)
print 'Saving additional labels/targets:',','.join(alllabels.keys()),
print ' of size',','.join(map(lambda x: str(len(x)),alllabels.values())),
print 'to',name+'_labels.pkl'
with open(name+'_labels.pkl','wb') as fp:
pickle.dump(alllabels, fp, -1)
else:
algorithm = GradientDescent(
cost=target_cost, params=cg.parameters,
step_rule=Adam(learning_rate) # Scale(learning_rate=learning_rate)
)
extensions = []
if model:
extensions.append(LoadFromDump(model))
extensions += [Timing(),
FinishAfter(after_n_epochs=epochs),
DataStreamMonitoring(
[recons_term, cost],
test_s,
prefix="test"),
TrainingDataMonitoring(
[cost,
aggregation.mean(algorithm.total_gradient_norm)],
prefix="train",
after_epoch=True),
Dump(runname, every_n_epochs=10),
Printing()]
if bokeh:
extensions.append(Plot(
'Auto',
channels=[
['test_recons_term','test_nll_bound','train_nll_bound'
],
['train_total_gradient_norm']]))
main_loop = MainLoop(
algorithm,
train_s,
model=newmodel,
extensions=extensions)
main_loop.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser("An example of training a Variational-Autoencoder.")
parser.add_argument("--name", default="mnist",
help="name of hdf5 data set")
parser.add_argument("--model",
help="start model to read")
parser.add_argument("--epochs", type=int, default=1000,
help="Number of training epochs to do.")
parser.add_argument("--bs", "--batch-size", type=int, dest="batch_size",
default=500, help="Size of each mini-batch")
parser.add_argument("--lr", "--learning-rate", type=float, dest="learning_rate",
default=1e-3, help="Learning rate")
parser.add_argument("--bokeh", action='store_true', default=False,
help="Set if you want to use Bokeh ")
parser.add_argument("--layers",
default="784,100,20", help="number of units in each layer of the encoder"
" (use 784, on first layer, for mnist.)"
" The last number (e.g. 20) is the dimension of the intermidiate layer."
" The decoder has the same layers as the encoder but in reverse"
" (e.g. 100, 784)")
parser.add_argument("--gamma", type=float,
default=3e-4, help="L2 weight")
parser.add_argument("-r","--rectifier",action='store_true',default=False,
help="Use RELU activation on hidden (default Tanh)")
parser.add_argument("-p","--predict",action='store_true',default=False,
help="Generate prediction of the intermidate layer and last layer of the encoder"
" instead of training."
" You must supply a pre-trained model and define all parameters to be the same"
" as in training. ")
parser.add_argument("-d","--dropout",action='store_true',default=False,
help="Use dropout")
parser.add_argument("-l","--qlinear",action='store_true',default=False,
help="Perform a deterministic linear transformation instead of sampling"
" on the intermidiate layer")
parser.add_argument("-s","--sparse",type=int,
help="Use sparse weight initialization. Give the number of non zero weights")
args = parser.parse_args()
main(**vars(args))
| codeaudit/VAE | VAE.py | Python | mit | 15,189 | [
"Gaussian"
] | 4ae8b6004149d6d4c9ce97ea32c71364d7d6e6e8b64ac19d83daf88916a9b959 |
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.GoRequirement import GoRequirement
@linter(executable='gofmt',
use_stdin=True,
output_format='corrected',
result_message='Formatting can be improved.')
class GofmtBear:
"""
Suggest better formatting options in Go code. Basic checks like alignment,
indentation, and redundant parentheses are provided.
This is done using the ``gofmt`` utility. For more information visit
<https://golang.org/cmd/gofmt/>.
"""
LANGUAGES = {'Go'}
REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting', 'Code Simplification'}
ASCIINEMA_URL = 'https://asciinema.org/a/94812'
@staticmethod
def create_arguments(filename, file, config_file,
simplify: bool = False,
):
"""
:param simplify: Tries to simplify code
"""
args = ()
if simplify:
args += ('-s',)
return args
| coala-analyzer/coala-bears | bears/go/GofmtBear.py | Python | agpl-3.0 | 1,184 | [
"VisIt"
] | fd92fb709cb53da5b75ea3423fbb1b6fc9b168b07ff436c2004861f131c3fad3 |
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to analyze the extent of alcohols
"""
import numpy as np
import MDAnalysis.core.distances as mddist
from sgenlib import moldyn
from sgenlib import mdactions
from sgenlib import pbc
class AlcoholLenAnalysis(mdactions.TrajectoryAction) :
def add_arguments(self, parser):
parser.add_argument('--sel',nargs=2,help="the selectiom mask for the atoms")
parser.add_argument('-o','--out',help="the output",default="alcohol_len.txt")
def setup(self, args):
self.sel1 = self.processor.universe.select_atoms(args.sel[0])
self.sel2 = self.processor.universe.select_atoms(args.sel[1])
print "Group selection contains %d and %d atoms"%(len(self.sel1), len(self.sel2))
self.out = args.out
self.records = []
def process(self):
zlen = np.mean(np.abs(self.sel1.positions[:, 2] -
self.sel2.positions[:, 2]))
diff2 = np.power(self.sel1.positions - self.sel2.positions, 2)
totlen = np.mean(np.sqrt(np.sum(diff2, axis=1)))
self.records.append(mdactions.MDRecord(self.processor.currtime,[totlen, zlen]))
def finalize(self):
self._write_records(headers="Tot_len Z_len".split())
if __name__ == '__main__' :
processor = moldyn.TrajectoryProcessor("Measure length of alcohol")
analysis = AlcoholLenAnalysis(processor)
processor.setup(printargs=True)
processor.process()
| SGenheden/Scripts | Projects/Yeast/md_alcohol_len.py | Python | mit | 1,477 | [
"MDAnalysis"
] | 072a061d7939dbbc93078def0800b522bda6cee294a9a53ff3495be5ce98a862 |
"""
This module deals with the wannier functions.
utils for the wannier90.x input and outputs.
"""
#from .wannier_utils import *
from .wannier import *
from .wann_ham import *
| mailhexu/pyDFTutils | pyDFTutils/wannier90/__init__.py | Python | lgpl-3.0 | 176 | [
"Wannier90"
] | 3c83348fcf2bd2e6234990ea4d6bac7fe70cd470d739c42dd5563ae4bafd9fe1 |
from __future__ import print_function
import moose
import pylab
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
import rdesigneur as rd
from scipy import stats
import time
baseFname = 'Fig6_data'
displayMoogli = False
displayScatter = False
displayRuns = False
dumpSpatialPlotsFlag = False
scatterParamToUseInStats = 0
realStartTime = time.time()
rdes = 0
# All of the spine indices are on path /model/elec/head<idx>
spineStimIndices = [[753, 771], [2475, 2482], [1765, 1778], [3551, 3561]]
# Below are corresponding indices for basal and apical dend trees.
dendBasalRange = [[16, 8] ] # Here the second idx in each pair is # of compt
dendApicalIndices = [[36,2], [70,4], [52, 13] ] # Same indexing as above.
# Stim amplitude is unitless, defined in units of A mol conc.
# Stim Width is unitless, defined as multiple of diffusion length.
# Stim Vel is unitless, defined in terms of diffusion length by time units of diff constt.
# diffConst is defined in terms of square of diffusion length by unit time.
# diffLength here is in SI units: m^2/s
#
params = {
'diffusionLength':1.0e-6, # Diffusion characteristic length, used as voxel length too.
'dendDiameter': 1e-6, # Diameter of section of dendrite in model
'dendLength': 100e-6, # Length of section of dendrite in model
'spineSizeScale': 1.0, # Length scaling for spines. Vol wil be x^3.
'diffConstCa':100.0e-12, # Diffusion constant of Ca
'diffConstMAPK': 5e-12, # Diffusion constant for MAPK
'diffConstPP': 2e-12, # Diff constant for MAPK-activated phosphatase
'CaActivateRafKf': 6e6, # 1/sec.mM: rate for activation of Raf by Ca
'blankVoxelsAtEnd':10, # of voxels to leave blank at end of cylinder
'preStimTime':1.0, # Time to run before turning on stimulus.
'stimBurstTime':2.0, # Time for a stimulus burst
'postStimTime':10.0, # Time to run after stimulus. ~3x decay time
'runtime':20.0, # Simulation run time
'checkPoint':1.0, # How often to do a checkpoint.
'chemPlotDt':0.05, # Plotting timestep for chemical signaling.
'elecPlotDt':0.1e-3, # Plotting timestep for electrical signaling.
'spineSpacing':4.0e-6, # Spacing between spines.
'stimSpacing':4, # Stimulus spacing, in terms of # of spines.
'meanSpikeRate':0.000001, # Basal mean rate for all synapses.
'activeSpikeRate':20.0, # Active input rate on specified synapses.
'baseGabaSpikeRate':1.0, # 1 Hz.
'thetaGabaSpikeRate':0.5, # This is the peak, not the average theta.
'thetaFreq':8.0, #
'amparSynapseWeight': 30.0, #
'nmdarSynapseWeight': 30.0, #
'gabarSynapseWeight': 30.0, #
'LCaDensity': 0.0, # Channel density for LCa channels.
'adaptorScale':60.0e3, # Adaptor scale factor from conc to Na density in Seimens/m^2.
'CaPsdScale': 0.08, # Adaptor scale from psd elec Ca to chem conc.
'Em': -60.0e-3, # Resting potential of neuron
'refractoryPeriod':0.010, # 10 ms refractory time.
'cellModel': 'VHC-neuron.CNG.swc', # Cell morphology file
'chemModel': 'NN_mapk15.g', # Chemical model file.
'fnumber': 0, # Output file index
'seed': 1234, # Seeds random numbers
'seqDx': 4.0e-6, # Sequence spatial interval
'seqDt': 3.0, # Sequence time interval
}
# Here we define a string for each of the 5 stimulus timings in sequence:
# Comment in the seq order you want.
seq = [0, 1, 2, 3, 4] # ordered
#seq = [4, 2, 0, 3, 1] #scrambled
stimStrList = ["{0} + (t>{1}) * (t<{2}) * {3}".format( 0, params['preStimTime'] + params['seqDt']*seq[idx], params['preStimTime'] + params['stimBurstTime']+ params['seqDt']*seq[idx], params['activeSpikeRate'] ) for idx in range(5) ]
gabaRateExpression = "{} + 2*{} * cos(3.14159*t*{})^2".format( params['baseGabaSpikeRate'], params['thetaGabaSpikeRate'], params['thetaFreq'] )
print( "GRE = ", gabaRateExpression )
plotlist = [
['soma', '1', '.', 'Vm', 'Soma Vm'],
['head12', '1', 'Ca_conc', 'Ca', 'head2 eCa'],
['dend36', '1', 'dend/DEND/P_MAPK', 'conc', 'dend36_P_MAPK'],
]
def main():
global rdes
global params
diffusionLength = params['diffusionLength']
dendLength = params['dendLength']
diffusionLength = params['diffusionLength']
library = moose.Neutral( '/library' )
chanpath = os.path.dirname( os.path.realpath(__file__)) + '/proto21.'
moose.seed( params['seed'] )
rdes = rd.rdesigneur(
useGssa = False,
turnOffElec = False,
chemPlotDt = params['chemPlotDt'],
diffusionLength = diffusionLength,
spineProto = [['makeExcSpine()', 'spine']],
chanProto = [
[ chanpath + 'make_K_AHP()', 'K_AHP' ],
[ chanpath + 'make_K_A()', 'K_A' ],
[ chanpath + 'make_K_C()', 'K_C' ],
[ chanpath + 'make_K_DR()', 'K_DR' ],
[ chanpath + 'make_Na()', 'Na' ],
[ chanpath + 'make_Ca_conc()', 'Ca_conc' ],
[ chanpath + 'make_Ca()', 'Ca' ],
[ chanpath + 'make_NMDA()', 'NMDA' ],
[ chanpath + 'make_glu()', 'glu' ],
[ chanpath + 'make_GABA()', 'GABA' ],
],
chemProto = [[params['chemModel'], 'chem']],
# branchedCell, name, somaDia, somaLen, dendDia, dendLen, dendNumSeg, branchDia, branchLen, branchNumSeg
cellProto = [['branchedCell', 'soma', 10e-6, 20e-6, 1.2e-6, 60e-6, 5, 0.6e-6, 100e-6, 5]],
chanDistrib = [
["Ca_conc", "#", "tau", "0.0133" ],
["Ca", "#dend#,#basal#,#apical#,#branch#", "Gbar", str( params["LCaDensity"] ) ],
["Ca", "#soma#", "Gbar", "40" ],
["Na", "#dend#,#basal#", "Gbar", "60" ],
["Na", "#soma#", "Gbar", "600" ],
["Na", "#apical#,#branch#", "Gbar", "40+40*exp(-p/200e-6)" ],
["K_DR", "#dend#,#basal#", "Gbar", "(p < 400e-6)*200" ],
["K_DR", "#soma#", "Gbar", "250" ],
["K_DR", "#apical#,#branch#", "Gbar", "60+40*(p < 125e-6)" ],
["K_AHP", "#", "Gbar", "8" ],
["K_C", "#basal#,#dend#,#apical#,#branch#", "Gbar", "50+150*exp(-p/200e-6)" ],
["K_C", "#soma#", "Gbar", "100" ],
["K_A", "#soma#", "Gbar", "50" ],
["K_A", "#dend#,#apical#,#branch#", "Gbar", "50*(1 + 2.0e-6/(dia + 0.1e-6))" ],
["GABA", "#apical#,#branch#,#dend#,#basal#", "Gbar", "10 + 30*(p < 125e-6)" ],
],
spineDistrib = [['spine','#dend#,#apical#,#branch#', str(params['spineSpacing']),'-1e-7', str( params['spineSizeScale'] ), '0.0', '0', '0' ]],
chemDistrib = [
['DEND', '#', 'dend', '1', diffusionLength ],
['SPINE', '#', 'spine', '1', 'DEND' ],
['PSD', '#', 'psd', '1', 'DEND' ]
],
# Ideally should be synced. There should be a way to do this.
stimList = [
[ 'head#', str( params['amparSynapseWeight'] ), 'glu', 'randsyn', str( params['meanSpikeRate'] ) ],
[ 'head#', str( params['nmdarSynapseWeight'] ), 'NMDA', 'randsyn', str( params['meanSpikeRate'] )],
[ '#', str( params['gabarSynapseWeight'] ), 'GABA', 'randsyn', gabaRateExpression ],
[ 'head7,head18,head32,head44,head53', '30', 'glu', 'periodicsyn', stimStrList[0]],
[ 'head8,head20,head30,head45,head54', '30', 'glu', 'periodicsyn', stimStrList[1]],
[ 'head9,head22,head33,head46,head55', '30', 'glu', 'periodicsyn', stimStrList[2]],
[ 'head10,head24,head29,head47,head56', '30', 'glu', 'periodicsyn', stimStrList[3]],
[ 'head11,head26,head31,head48,head57', '30', 'glu', 'periodicsyn', stimStrList[4]],
[ 'head7,head18,head32,head44,head53', '30', 'NMDA', 'periodicsyn', stimStrList[0]],
[ 'head8,head20,head30,head45,head54', '30', 'NMDA', 'periodicsyn', stimStrList[1]],
[ 'head9,head22,head33,head46,head55', '30', 'NMDA', 'periodicsyn', stimStrList[2]],
[ 'head10,head24,head29,head47,head56', '30', 'NMDA', 'periodicsyn', stimStrList[3]],
[ 'head11,head26,head31,head48,head57', '30', 'NMDA', 'periodicsyn', stimStrList[4]],
],
adaptorList = [
[ 'Ca_conc', 'Ca', 'PSD/Ca_input', 'concInit', 2e-6, params['CaPsdScale'] ],
['Ca_conc','Ca','DEND/Ca_input','concInit',2.0e-6, 0.0001],
[ 'DEND/channel_p', 'conc', 'Na', 'modulation', 1.0, params['adaptorScale']],
],
plotList = [
['soma', '1', '.', 'Vm', 'Soma Vm'],
['#', '1', 'SPINE/Ca', 'conc', 'Chem Ca conc'],
['#', '1', 'DEND/P_MAPK', 'conc', 'P_MAPK conc'],
],
moogList = [
#['#', '1', '.', 'Vm', 'Memb potential'],
['#', '1', 'DEND/P_MAPK', 'conc', '[P_MAPK] (uM)',0, 0.15],
['#', '1', 'SPINE/Ca', 'conc', '[Ca] (uM)', 0, 0.5, True, 2],
]
)
############## Set Ca diffusion const ##########################
for ca in moose.wildcardFind( '/library/##/Ca[ISA=PoolBase]' ):
ca.diffConst = params['diffConstCa']
############## Set MAPK diffusion const ##########################
temp = params['diffConstMAPK']
moose.element( '/library/chem/kinetics/DEND/P_MAPK' ).diffConst = temp
moose.element( '/library/chem/kinetics/DEND/MAPK' ).diffConst = temp
############## Set PP diffusion const ##########################
temp = params['diffConstPP']
moose.element( '/library/chem/kinetics/DEND/reg_phosphatase' ).diffConst = temp
moose.element( '/library/chem/kinetics/DEND/inact_phosphatase' ).diffConst = temp
############## Set resting potential ##########################
for i in moose.wildcardFind( "/library/##[][ISA=CompartmentBase]" ):
i.Em = params[ 'Em' ]
i.initVm = params[ 'Em' ]
############## Set sensitivity to Ca ##########################
moose.element( '/library/chem/kinetics/DEND/Ca_activate_Raf' ).Kf = 6e6
#################### Build the model ##########################
rdes.buildModel()
moose.reinit()
moose.seed( 1 )
rdes.displayMoogli( 0.01, params['runtime'], 0.0, colormap = 'plasma', mergeDisplays = True, bg = 'default' )
if __name__ == '__main__':
main()
| BhallaLab/moose-examples | tutorials/Rdesigneur/ex12.0_multiscale_seq_selectivity.py | Python | gpl-2.0 | 10,548 | [
"MOOSE",
"NEURON"
] | 0c229555f89a2fe49f6e1a8424e6ec6b72e58e457ed1bf991f7c65626db763d0 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Handling of loading new/existing databases.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import os
import logging
#-------------------------------------------------------------------------
#
# Set up logging
#
#-------------------------------------------------------------------------
_LOG = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# GTK+ modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.cli.grampscli import CLIDbLoader
from gramps.gen.config import config
from gramps.gen.db import DbBsddb
from gramps.gen.db.exceptions import (DbUpgradeRequiredError,
BsddbDowngradeError,
DbVersionError,
DbEnvironmentError,
BsddbUpgradeRequiredError,
BsddbDowngradeRequiredError,
PythonUpgradeRequiredError,
PythonDowngradeError)
from gramps.gen.constfunc import STRTYPE, UNITYPE, conv_to_unicode
from .pluginmanager import GuiPluginManager
from .dialog import (DBErrorDialog, ErrorDialog, QuestionDialog2,
WarningDialog)
from .user import User
from gramps.gen.errors import DbError
#-------------------------------------------------------------------------
#
# DbLoader class
#
#-------------------------------------------------------------------------
class DbLoader(CLIDbLoader):
def __init__(self, dbstate, uistate):
CLIDbLoader.__init__(self, dbstate)
self.uistate = uistate
self.import_info = None
def _warn(self, title, warnmessage):
WarningDialog(title, warnmessage)
def _errordialog(self, title, errormessage):
"""
Show the error.
In the GUI, the error is shown, and a return happens
"""
ErrorDialog(title, errormessage)
return 1
def _dberrordialog(self, msg):
import traceback
exc = traceback.format_exc()
try:
DBErrorDialog(str(msg.value))
_LOG.error(str(msg.value))
except:
DBErrorDialog(str(msg))
_LOG.error(str(msg) +"\n" + exc)
def _begin_progress(self):
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.uistate.pulse_progressbar(0)
def _pulse_progress(self, value):
self.uistate.pulse_progressbar(value)
def _end_progress(self):
self.uistate.set_busy_cursor(False)
self.uistate.progress.hide()
def import_file(self):
self.import_info = None
# First thing first: import is a batch transaction
# so we will lose the undo history. Warn the user.
if self.dbstate.db.get_number_of_people() > 0:
warn_dialog = QuestionDialog2(
_('Undo history warning'),
_('Proceeding with import will erase the undo history '
'for this session. In particular, you will not be able '
'to revert the import or any changes made prior to it.\n\n'
'If you think you may want to revert the import, '
'please stop here and backup your database.'),
_('_Proceed with import'), _('_Stop'),
self.uistate.window)
if not warn_dialog.run():
return False
pmgr = GuiPluginManager.get_instance()
import_dialog = Gtk.FileChooserDialog(_('Gramps: Import Family Tree'),
self.uistate.window,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
_('Import'),
Gtk.ResponseType.OK))
import_dialog.set_local_only(False)
# Always add automatic (match all files) filter
add_all_files_filter(import_dialog) # *
# Add more file type selections for available importers
for plugin in pmgr.get_import_plugins():
file_filter = Gtk.FileFilter()
name = "%s (.%s)" % (plugin.get_name(), plugin.get_extension())
file_filter.set_name(name)
file_filter.add_pattern("*.%s" % plugin.get_extension())
file_filter.add_pattern(plugin.get_extension().capitalize())
import_dialog.add_filter(file_filter)
(box, type_selector) = format_maker()
import_dialog.set_extra_widget(box)
# Suggested folder: try last open file, import, then last export,
# then home.
default_dir = config.get('paths.recent-import-dir')
if len(default_dir)<=1:
default_dir = get_default_dir()
import_dialog.set_current_folder(default_dir)
while True:
response = import_dialog.run()
if response == Gtk.ResponseType.CANCEL:
break
elif response == Gtk.ResponseType.OK:
filename = conv_to_unicode(import_dialog.get_filename())
if self.check_errors(filename):
# displays errors if any
continue
(the_path, the_file) = os.path.split(filename)
config.set('paths.recent-import-dir', the_path)
extension = type_selector.get_value()
if extension == 'auto':
# Guess the file format based on the file extension.
# This will get the lower case extension without a period,
# or an empty string.
extension = os.path.splitext(filename)[-1][1:].lower()
for plugin in pmgr.get_import_plugins():
if extension == plugin.get_extension():
self.do_import(import_dialog,
plugin.get_import_function(),
filename)
return True
# Finally, we give up and declare this an unknown format
ErrorDialog(
_("Could not open file: %s") % filename,
_('File type "%s" is unknown to Gramps.\n\n'
'Valid types are: Gramps database, Gramps XML, '
'Gramps package, GEDCOM, and others.') % extension)
import_dialog.destroy()
return False
def check_errors(self, filename):
"""
Run common error checks and return True if any found.
In this process, a warning dialog can pop up.
"""
if not isinstance(filename, (STRTYPE, UNITYPE)):
return True
filename = os.path.normpath(os.path.abspath(filename))
if len(filename) == 0:
return True
elif os.path.isdir(filename):
ErrorDialog(
_('Cannot open file'),
_('The selected file is a directory, not a file.\n'))
return True
elif os.path.exists(filename):
if not os.access(filename, os.R_OK):
ErrorDialog(
_('Cannot open file'),
_('You do not have read access to the selected file.'))
return True
else:
try:
f = file(filename,'w')
f.close()
os.remove(filename)
except IOError:
ErrorDialog(
_('Cannot create file'),
_('You do not have write access to the selected file.'))
return True
return False
def do_import(self, dialog, importer, filename):
self.import_info = None
dialog.destroy()
self._begin_progress()
try:
#an importer can return an object with info, object.info_text()
#returns that info. Otherwise None is set to import_info
self.import_info = importer(self.dbstate.db, filename,
User(callback=self._pulse_progress,
uistate=self.uistate))
dirname = os.path.dirname(filename) + os.path.sep
config.set('paths.recent-import-dir', dirname)
except UnicodeError as msg:
ErrorDialog(
_("Could not import file: %s") % filename,
_("This file incorrectly identifies its character "
"set, so it cannot be accurately imported. Please fix the "
"encoding, and import again") + "\n\n %s" % msg)
except Exception:
_LOG.error("Failed to import database.", exc_info=True)
self._end_progress()
def import_info_text(self):
"""
On import the importer can construct an info object about the import.
If so, this method will return this text, otherwise the empty string
is returned
"""
if self.import_info is None:
return ""
return self.import_info.info_text()
def read_file(self, filename):
"""
This method takes care of changing database, and loading the data.
In 3.0 we only allow reading of real databases of filetype
'x-directory/normal'
This method should only return on success.
Returning on failure makes no sense, because we cannot recover,
since database has already been changed.
Therefore, any errors should raise exceptions.
On success, return with the disabled signals. The post-load routine
should enable signals, as well as finish up with other UI goodies.
"""
if os.path.exists(filename):
if not os.access(filename, os.W_OK):
mode = "r"
self._warn(_('Read only database'),
_('You do not have write access '
'to the selected file.'))
else:
mode = "w"
else:
mode = 'w'
db = DbBsddb()
db.disable_signals()
self.dbstate.no_database()
self._begin_progress()
force_schema_upgrade = False
force_bsddb_upgrade = False
force_bsddb_downgrade = False
force_python_upgrade = False
try:
while True:
try:
db.load(filename, self._pulse_progress,
mode, force_schema_upgrade,
force_bsddb_upgrade,
force_bsddb_downgrade,
force_python_upgrade)
db.set_save_path(filename)
self.dbstate.change_database(db)
break
except DbUpgradeRequiredError as msg:
if QuestionDialog2(_("Are you sure you want to upgrade "
"this Family Tree?"),
str(msg),
_("I have made a backup,\n"
"please upgrade my Family Tree"),
_("Cancel"), self.uistate.window).run():
force_schema_upgrade = True
force_bsddb_upgrade = False
force_bsddb_downgrade = False
force_python_upgrade = False
else:
self.dbstate.no_database()
break
except BsddbUpgradeRequiredError as msg:
if QuestionDialog2(_("Are you sure you want to upgrade "
"this Family Tree?"),
str(msg),
_("I have made a backup,\n"
"please upgrade my tree"),
_("Cancel"), self.uistate.window).run():
force_schema_upgrade = False
force_bsddb_upgrade = True
force_bsddb_downgrade = False
force_python_upgrade = False
else:
self.dbstate.no_database()
break
except BsddbDowngradeRequiredError as msg:
if QuestionDialog2(_("Are you sure you want to downgrade "
"this Family Tree?"),
str(msg),
_("I have made a backup,\n"
"please downgrade my Family Tree"),
_("Cancel"), self.uistate.window).run():
force_schema_upgrade = False
force_bsddb_upgrade = False
force_bsddb_downgrade = True
force_python_upgrade = False
else:
self.dbstate.no_database()
break
except PythonUpgradeRequiredError as msg:
if QuestionDialog2(_("Are you sure you want to upgrade "
"this Family Tree?"),
str(msg),
_("I have made a backup,\n"
"please upgrade my Family Tree"),
_("Cancel"), self.uistate.window).run():
force_schema_upgrade = False
force_bsddb_upgrade = False
force_bsddb_downgrade = False
force_python_upgrade = True
else:
self.dbstate.no_database()
break
# Get here is there is an exception the while loop does not handle
except BsddbDowngradeError as msg:
self.dbstate.no_database()
self._warn( _("Cannot open database"), str(msg))
except DbVersionError as msg:
self.dbstate.no_database()
self._errordialog( _("Cannot open database"), str(msg))
except DbEnvironmentError as msg:
self.dbstate.no_database()
self._errordialog( _("Cannot open database"), str(msg))
except PythonDowngradeError as msg:
self.dbstate.no_database()
self._warn( _("Cannot open database"), str(msg))
except OSError as msg:
self.dbstate.no_database()
self._errordialog(
_("Could not open file: %s") % filename, str(msg))
except DbError as msg:
self.dbstate.no_database()
self._dberrordialog(msg)
except Exception as newerror:
self.dbstate.no_database()
self._dberrordialog(str(newerror))
self._end_progress()
return True
#-------------------------------------------------------------------------
#
# default dir selection
#
#-------------------------------------------------------------------------
def get_default_dir():
# Suggested folder: try last open file, last import, last export,
# then home.
default_dir = os.path.dirname(config.get('paths.recent-file'))
if default_dir:
default_dir += os.path.sep
if len(default_dir)<=1:
default_dir = config.get('paths.recent-import-dir')
if len(default_dir)<=1:
default_dir = config.get('paths.recent-export-dir')
if len(default_dir)<=1:
default_dir = '~/'
else:
default_dir = "~/"
return default_dir
#-------------------------------------------------------------------------
#
# FileChooser filters: what to show in the file chooser
#
#-------------------------------------------------------------------------
def add_all_files_filter(chooser):
"""
Add an all-permitting filter to the file chooser dialog.
"""
mime_filter = Gtk.FileFilter()
mime_filter.set_name(_('All files'))
mime_filter.add_pattern('*')
chooser.add_filter(mime_filter)
#-------------------------------------------------------------------------
#
# Format selectors: explictly set the format of the file
#
#-------------------------------------------------------------------------
class GrampsFormatWidget(Gtk.ComboBox):
def __init__(self):
GObject.GObject.__init__(self, model=None)
def set(self, format_list):
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
self.format_list = format_list
for format, label in format_list:
self.store.append(row=[label])
self.set_active(False)
def get_value(self):
active = self.get_active()
if active < 0:
return None
return self.format_list[active][0]
def format_maker():
"""
A factory function making format selection widgets.
Accepts a list of formats to include into selector.
The auto selection is always added as the first one.
The returned box contains both the label and the selector.
"""
pmgr = GuiPluginManager.get_instance()
format_list = [ ('auto', _('Automatically detected')) ]
for plugin in pmgr.get_import_plugins():
format_list.append( (plugin.get_extension(), plugin.get_name()) )
type_selector = GrampsFormatWidget()
type_selector.set(format_list)
box = Gtk.HBox()
label = Gtk.Label(label=_('Select file _type:'))
label.set_use_underline(True)
label.set_mnemonic_widget(type_selector)
box.pack_start(label, expand=False, fill=False, padding=6)
box.add(type_selector)
box.show_all()
return (box, type_selector)
| pmghalvorsen/gramps_branch | gramps/gui/dbloader.py | Python | gpl-2.0 | 19,621 | [
"Brian"
] | af551a7e1ca20449ebac96e35be58b79d902e3e6cfbc209ae36582620d46ca67 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 21 19:26:59 2016
@author: GDM
"""
import HaSAPPy.INFOloads as INFOloads
import argparse
parser = argparse.ArgumentParser(description='Launching command of HaSAPPy program')
print
print "***********************************************************"
print "***HaSAPPy: Haploid Screening Analysis Package in Python***"
print '***********************************************************\n\n'
parser.add_argument('path', help = 'Provide PATH of LoadModule file to start analysis. For more details visit HaSAPPy webpage on "https://github.com/gdiminin/HaSAPPy" ', action="store")
text = parser.parse_args()
if text.path == None:
print '\nWARNING: informations provided are not sufficent.\nCheck -h option to have more details on requested parameters'
informations = {}
informations = INFOloads.read_txt(informations,text.path)
analysis = INFOloads.Info(informations)
analysis.upload_informations(informations)
analysis.starting()
analysis.to_do()
analysis.make_folders()
analysis.fill_up()
analysis.print_info()
if analysis.Type_analysis.Trim:
import HaSAPPy.Trim as Trim
Trim.load(analysis)
if analysis.Type_analysis.AlignPhix or analysis.Type_analysis.AlignGenome:
import HaSAPPy.Align as Align
Align.load(analysis)
if analysis.Type_analysis.IIDefinition:
import HaSAPPy.IIDefinition as IIDefinition
IIDefinition.load(analysis)
if analysis.Type_analysis.GeneDefinition:
import HaSAPPy.GeneDefinition as GeneDefinition
GeneDefinition.library_analysis(analysis)
if analysis.Type_analysis.GroupAnalysis:
import HaSAPPy.GroupAnalysis as GroupAnalysis
GroupAnalysis.performing_analysis(analysis)
if analysis.Type_analysis.Tables:
import HaSAPPy.Tables as Tables
Tables.main(analysis)
if analysis.Type_analysis.Design:
import HaSAPPy.DesignGeneInsertion as Design
Design.start(analysis)
| gdiminin/HaSAPPy | HaSAPPy/HaSAPPY.py | Python | mit | 1,938 | [
"VisIt"
] | 6c3d816b64b648e08453c595f8fadce266d53fbc5c81b6c630750986d19ad7de |
from distutils.core import setup
setup(name = 'tovtk',
version = '1.4.2',
description = 'Python meshtal to vtk converter',
author = 'A. Travleev, INR-KIT',
author_email = 'anton.travleev@kit.edu',
packages = ['tovtk', ],
entry_points = {'console_scripts': ['tovtk = tovtk.main:main']},
)
| inr-kit/tovtk | setup.py | Python | gpl-3.0 | 333 | [
"VTK"
] | d1f7f58f84e38f540c993cb99f875708f0dad326889ea03d558f04ee146e6653 |
# -*- coding: utf-8 -*-
try:
import numpy as np
except ImportError:
# required due to ase/test/eoswoase.py
pass
class EquationOfStateSJEOS:
"""Fit equation of state for bulk systems.
10.1103/PhysRevB.67.026103
The following equation is used::
A third order inverse polynomial fit
2 3 -1/3
E(V) = c + c t + c t + c t , t = V
0 1 2 3
More methods available in ase.utils.eosase2
Use::
eos = EquationOfState(volumes, energies)
v0, e0, B = eos.fit()
eos.plot()
"""
def __init__(self, volumes, energies, eos='sjeos'):
assert eos == 'sjeos', eos + ' eos not available. Probably scipy missing.'
self.v = np.array(volumes)
self.e = np.array(energies)
self.eos_string = 'sjeos'
self.v0 = None
def fit(self):
"""Calculate volume, energy, and bulk modulus.
Returns the optimal volume, the minumum energy, and the bulk
modulus. Notice that the ASE units for the bulk modulus is
eV/Angstrom^3 - to get the value in GPa, do this::
v0, e0, B = eos.fit()
print B / kJ * 1.0e24, 'GPa'
"""
fit0 = np.poly1d(np.polyfit(self.v**-(1.0 / 3), self.e, 3))
fit1 = np.polyder(fit0, 1)
fit2 = np.polyder(fit1, 1)
self.v0 = None
for t in np.roots(fit1):
if t > 0 and fit2(t) > 0:
self.v0 = t**-3
break
if self.v0 is None:
raise ValueError('No minimum!')
self.e0 = fit0(t)
self.B = t**5 * fit2(t) / 9
self.fit0 = fit0
return self.v0, self.e0, self.B
def plot(self, filename=None, show=None):
"""Plot fitted energy curve.
Uses Matplotlib to plot the energy curve. Use *show=True* to
show the figure and *filename='abc.png'* or
*filename='abc.eps'* to save the figure to a file."""
#import matplotlib.pyplot as plt
import pylab as plt
if self.v0 is None:
self.fit()
if filename is None and show is None:
show = True
x = 3.95
f = plt.figure(figsize=(x * 2.5**0.5, x))
f.subplots_adjust(left=0.12, right=0.9, top=0.9, bottom=0.15)
plt.plot(self.v, self.e, 'o')
x = np.linspace(min(self.v), max(self.v), 100)
y = self.fit0(x**-(1.0 / 3))
plt.plot(x, y, '-r')
try:
from ase.units import kJ
plt.xlabel(u'volume [Å^3]')
plt.ylabel(u'energy [eV]')
plt.title(u'%s: E: %.3f eV, V: %.3f Å^3, B: %.3f GPa' %
(self.eos_string, self.e0, self.v0, self.B / kJ * 1.e24))
except ImportError:
plt.xlabel(u'volume [L(length)^3]')
plt.ylabel(u'energy [E(energy)]')
plt.title(u'%s: E: %.3f E, V: %.3f L^3, B: %.3e E/L^3' %
(self.eos_string, self.e0, self.v0, self.B))
if show:
plt.show()
if filename is not None:
f.savefig(filename)
return f
if __name__ == '__main__':
try:
import numpy as np
# from ase/test/eos.py
volumes = [29.205536, 30.581492, 32.000000, 33.461708, 34.967264]
energies = [0.0190898, -0.0031172, -0.0096925, -0.0004014, 0.0235753]
sjeos = (31.867118229937798, -0.0096410046694188622, 0.23984474782755572)
eos = EquationOfStateSJEOS(volumes, energies)
v0, e0, B = eos.fit()
assert abs(v0 - sjeos[0]) < 5.e-6
assert abs(B - sjeos[2]) < 5.e-6
except ImportError:
pass
| alexei-matveev/ase-local | ase/utils/sjeos.py | Python | gpl-2.0 | 3,676 | [
"ASE"
] | af4eb41e3e01c8188a2255f1550ab017425462c4a9edd261a52abebf39a20bb2 |
#
# Copyright 2001 - 2011 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige import *
from xml.dom.minidom import Node
from ige.IObject import IObject
from ige.IDataHolder import IDataHolder
import Rules, Utils, ShipUtils
from Const import *
import math, random, copy
from ige import log
class IPlanet(IObject):
typeID = T_PLANET
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.plDiameter = 0
obj.plType = u'-'
obj.plMin = 0
obj.plBio = 0
obj.plEn = 0
obj.plEnv = 0
obj.plSlots = 0
obj.plMaxSlots = 0
#obj.plMoonsSlots = 0
#obj.plMaxMoonsSlots = 0
obj.plStratRes = 0
obj.plDisease = 0
obj.plStarting = 0
obj.orbit = 0
obj.storPop = 0
obj.slots = []
obj.lastPirCapture = 0
# storage
obj.storBio = 0
obj.storEn = 0
obj.minBio = Rules.colonyMinBio
obj.minEn = Rules.colonyMinEn
obj.maxBio = 0
obj.maxEn = 0
# changes/prod
obj.prodQueue = []
obj.globalQueue = 0
obj.changeBio = 0
obj.changeEn = 0
obj.changePop = 0
obj.changeEnv = 0
obj.prodProd = 0
obj.effProdProd = 0
obj.prodSci = 0
obj.effProdSci = 0
obj.unemployedPop = 0
# eating / housing
obj.popEatBio = 10
obj.popEatEn = 0
obj.maxPop = 0
# extra goodies
obj.solarmod = 0
obj.scannerPwr = 0
obj.signature = 75
obj.autoMinStor = 1
obj.morale = Rules.maxMorale
obj.changeMorale = 0.0
obj.moraleTrgt = 0.0
# moraleModifiers [ base morale by distance from homeworld, from buildings, from population, sumary 1+2+3 ]
obj.moraleModifiers = [ 0.0 , 0.0 , 0.0 , 0.0 ]
obj.revoltLen = 0
obj.combatExp = 0
obj.isMilitary = 0
obj.refuelMax = 0
obj.refuelInc = 0
obj.repairShip = 0.0
obj.upgradeShip = 0.0
obj.trainShipInc = 0
obj.trainShipMax = 0
obj.fleetSpeedBoost = 1.0
obj.ownerSince = 0
obj.shield = 0 #current planetary shield level
obj.maxShield = 0 #structural max sheild (best structure method)
obj.prevShield = 0 #previous turn's shield level (for client growth calculation)
def startConstruction(self, tran, obj, techID, quantity, targetID, isShip, reportFinished,
demolishStruct):
if len(obj.prodQueue) > Rules.maxProdQueueLen:
raise GameException('Queue is full.')
if quantity < 1:
raise GameException("Quantity must be greater than 0")
player = tran.db[obj.owner]
if not player.techs.has_key(techID) and isShip == 0:
raise GameException('You do not own this kind of technology.')
if not player.shipDesigns.has_key(techID) and isShip == 1:
raise GameException('You do not own this ship design.')
if targetID not in tran.db[obj.compOf].planets:
raise GameException('You can build only in the same system.')
if isShip:
tech = player.shipDesigns[techID]
if tech.upgradeTo:
raise GameException("You cannot build obsolete ship design.")
else:
tech = Rules.techs[techID]
if not (tech.isStructure or tech.isProject):
raise GameException('You cannot construct this technology.')
if not tech.validateConstrHandler(tran, obj, tran.db[targetID], tech):
raise GameException('Conditions for construction are not satisfied.')
neededSR = {}
for sr in tech.buildSRes:
if player.stratRes.get(sr, 0) < neededSR.get(sr, 0) + quantity:
raise GameException("You do not own required strategic resource(s)")
neededSR[sr] = neededSR.get(sr, 0) + quantity
# consume strategic resources
for sr in neededSR:
player.stratRes[sr] -= neededSR[sr]
# start construction
item = IDataHolder()
item.techID = techID
item.currProd = 0
item.currTurn = 0
item.quantity = int(quantity)
item.targetID = targetID
item.changePerc = 0
item.isShip = bool(isShip)
item.reportFin = bool(reportFinished)
item.demolishStruct = demolishStruct
item.type = T_TASK
obj.prodQueue.append(item)
return obj.prodQueue, player.stratRes
startConstruction.public = 1
startConstruction.accLevel = AL_FULL
def changeConstruction(self, tran, obj, index, quantity):
if index < 0 or index >= len(obj.prodQueue):
raise GameException("No such item in the construction queue.")
if quantity < 1:
raise GameException("Quantity must be greater than 0")
player = tran.db[obj.owner]
item = obj.prodQueue[index]
if item.isShip:
tech = player.shipDesigns[item.techID]
else:
tech = Rules.techs[item.techID]
quantityChange = quantity - obj.prodQueue[index].quantity
neededSR = {}
for sr in tech.buildSRes:
if player.stratRes.get(sr, 0) < neededSR.get(sr, 0) + quantityChange:
raise GameException("You do not own required strategic resource(s)")
neededSR[sr] = neededSR.get(sr, 0) + quantityChange
# consume strategic resources
for sr in neededSR:
player.stratRes[sr] += (-1 * neededSR[sr])
obj.prodQueue[index].quantity = quantity
return obj.prodQueue, player.stratRes
changeConstruction.public = 1
changeConstruction.accLevel = AL_FULL
def abortConstruction(self, tran, obj, index):
if index >= len(obj.prodQueue):
raise GameException('No such item in the construction queue.')
# Free strategic resources
player = tran.db[obj.owner]
item = obj.prodQueue[index]
if item.isShip:
tech = player.shipDesigns[item.techID]
else:
tech = Rules.techs[item.techID]
for sr in tech.buildSRes:
player.stratRes[sr] = player.stratRes.get(sr, 0) + item.quantity
# delete task
del obj.prodQueue[index]
return obj.prodQueue, player.stratRes
abortConstruction.public = 1
abortConstruction.accLevel = AL_FULL
def moveConstrItem(self, tran, obj, index, rel):
if index >= len(obj.prodQueue):
raise GameException('No such item in the construction queue.')
if index + rel < 0 or index + rel >= len(obj.prodQueue):
raise GameException('Cannot move.')
item = obj.prodQueue[index]
del obj.prodQueue[index]
obj.prodQueue.insert(index + rel, item)
return obj.prodQueue
moveConstrItem.public = 1
moveConstrItem.accLevel = AL_FULL
def changeOwner(self, tran, obj, ownerID, force = 0):
oldOwnerID = obj.owner
if obj.owner == ownerID:
# the owner is the same
return
elif obj.owner != OID_NONE and force == 0:
# this planet is already owned!
# TODO resolve conflict (based on player relations)
raise GameException('Planet is already owned by another commander.')
elif obj.owner != OID_NONE and force == 1:
# remove planet from old owner
try:
oldOwner = tran.db[obj.owner]
oldOwner.planets.remove(obj.oid)
if tran.db.has_key(obj.owner):
Utils.sendMessage(tran, obj, MSG_LOST_PLANET, obj.oid, None)
except Exception:
log.warning("Cannot remove planet from owner", obj.oid, obj.owner)
oldOwnerID = OID_NONE
# reset timer
obj.ownerSince = tran.db[OID_UNIVERSE].turn
# add planet to new owner's empire
if ownerID != OID_NONE:
newOwner = tran.db[ownerID]
newOwner.planets.append(obj.oid)
# reset some attributes
obj.owner = ownerID
obj.revoltLen = 0 # no revolt
obj.prodQueue = [] # clear production queue
obj.globalQueue = 0 # default global queue
obj.autoMinStor = 1 # storage is set to auto
if ownerID != OID_NONE:
# notify player
Utils.sendMessage(tran, obj, MSG_GAINED_PLANET, obj.oid, None)
changeOwner.public = 1
changeOwner.accLevel = AL_ADMIN
def setMinStorage(self, tran, obj, bio, en):
if bio < 0 or en < 0:
raise GameException('Values must be equal or greater than zero.')
obj.minBio = bio
obj.minEn = en
setMinStorage.public = 1
setMinStorage.accLevel = AL_FULL
def setAutoMinStorage(self, tran, obj, on):
if on != 0 and on != 1:
raise GameException('Must be 0 or 1')
obj.autoMinStor = on
return obj.autoMinStor
setAutoMinStorage.public = 1
setAutoMinStorage.accLevel = AL_FULL
def setStructOn(self, tran, obj, slotIdx, on):
if slotIdx >= len(obj.slots) or slotIdx < 0:
raise GameException('No such structure.')
if on:
obj.slots[slotIdx][STRUCT_IDX_STATUS] |= STRUCT_STATUS_ON
else:
obj.slots[slotIdx][STRUCT_IDX_STATUS] &= ~STRUCT_STATUS_ON
return obj.slots[slotIdx]
setStructOn.public = 1
setStructOn.accLevel = AL_FULL
def demolishStruct(self, tran, obj, slotIdx):
# TODO implement special button for demolishing structures when
# planet surrenders
#isCombat = tran.db[obj.compOf].combatCounter > 0
#if isCombat and len(obj.slots) < obj.plSlots:
# raise GameException("You cannot destroy this structure under fire - at least one slot is free.")
if slotIdx >= len(obj.slots) or slotIdx < 0:
raise GameException('No such structure.')
del obj.slots[slotIdx]
return obj.slots
demolishStruct.public = 1
demolishStruct.accLevel = AL_FULL
def moveStruct(self, tran, obj, slotIdx, rel):
if slotIdx >= len(obj.slots) or slotIdx < 0:
raise GameException('No such structure.')
if slotIdx + rel < 0 or slotIdx + rel >= len(obj.slots):
raise GameException('Cannot move.')
struct = obj.slots[slotIdx]
del obj.slots[slotIdx]
obj.slots.insert(slotIdx + rel, struct)
return obj.slots
moveStruct.public = 1
moveStruct.accLevel = AL_FULL
def processPRODPhase(self, tran, obj, data):
if obj.plType == "A":
self.cmd(obj).generateAsteroid(tran, obj)
# max storage
obj.maxPop = obj.plSlots * Rules.popPerSlot + Rules.popBaseStor
obj.maxBio = obj.plSlots * Rules.bioPerSlot + Rules.bioBaseStor
obj.maxEn = obj.plSlots * Rules.enPerSlot + Rules.enBaseStor
# refuel & repair
obj.refuelMax = 0
obj.refuelInc = 0
obj.repairShip = 0.0
obj.upgradeShip = 0.0
# train
obj.trainShipInc = 0
obj.trainShipMax = 0
obj.fleetSpeedBoost = 1.0
#
if obj.storPop <= 0 and not obj.slots and obj.owner == OID_NONE:
# do not process this planet
return
obj.scannerPwr = Rules.scannerMinPwr
obj.prodProd = obj.prodSci = 0
obj.changeBio = - obj.storBio
obj.changeEn = - obj.storEn
obj.changePop = - obj.storPop
obj.changeEnv = - obj.plEnv
obj.changeMorale = - obj.morale
# parent objects
system = tran.db[obj.compOf]
galaxy = tran.db[system.compOf]
# env. conditions
emrLevel = galaxy.emrLevel
# collect strategic resources
owner = tran.db.get(obj.owner, None)
if owner and obj.plStratRes != SR_NONE:
turn = tran.db[OID_UNIVERSE].turn
if turn % Rules.stratResRate == 0:
owner.stratRes[obj.plStratRes] = owner.stratRes.get(obj.plStratRes, 0) + 1
Utils.sendMessage(tran, obj, MSG_EXTRACTED_STRATRES, obj.oid, obj.plStratRes)
# compute base morale
if owner:
homePlanet = tran.db[owner.planets[0]]
dist = int(math.sqrt((homePlanet.x - obj.x) ** 2 + (homePlanet.y - obj.y) ** 2))
moraleTrgt = -37.5 * dist / owner.govPwrCtrlRange + 107.5
obj.moraleModifiers[0] = max(Rules.minMoraleTrgt, min(moraleTrgt, Rules.maxMorale))
#@log.debug(obj.oid, "Morale target", obj.moraleTrgt, "dist", dist, owner.govPwrCtrlRange)
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio = obj.minEn = 0
# combat?
isCombat = system.combatCounter > 0
obj.unemployedPop = obj.storPop
# ok, reset max pop
obj.maxPop = 0
# process all structures
destroyed = []
obj.maxShield = 0
obj.solarmod = 0
#@log.debug("Morale bonus/penalty for planet", obj.oid, moraleBonus)
# reset of "morale modifier by buildings" value
obj.moraleModifiers[1] = 0
for struct in obj.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
# compute struct effectivity
techEff = Utils.getTechEff(tran, struct[STRUCT_IDX_TECHID], obj.owner)
# morale does not affect hit points of structures
maxHP = int(tech.maxHP * techEff)
if maxHP < struct[STRUCT_IDX_HP]:
# damage structure
struct[STRUCT_IDX_HP] = max(maxHP, struct[STRUCT_IDX_HP] - int(maxHP * Rules.decayRatio))
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio += tech.operBio * Rules.autoMinStorTurns
obj.minEn += tech.operEn * Rules.autoMinStorTurns
struct[STRUCT_IDX_STATUS] &= STRUCT_STATUS_RESETFLGS
# each structure accomodate it's workers
obj.maxPop += tech.operWorkers
# produce/consume resources
# find most limitating condition
try:
opStatus = min(1.0, float(struct[STRUCT_IDX_HP]) / maxHP)
except:
opStatus = 0.0
log.warning('Invalid max HP of structure', STRUCT_IDX_TECHID)
if tech.operBio > 0:
opStatus = min(opStatus, float(obj.storBio) / tech.operBio)
if tech.operEn > 0:
opStatus = min(opStatus, float(obj.storEn) / tech.operEn)
if tech.operWorkers > 0:
opStatus = min(opStatus, float(obj.unemployedPop) / tech.operWorkers)
if not struct[STRUCT_IDX_STATUS] & STRUCT_STATUS_ON:
opStatus = 0.0
struct[STRUCT_IDX_OPSTATUS] = int(100 * opStatus)
# solarmod effects ENV change and terraforming only if benificial
if tech.solarMod * opStatus > 0:
obj.solarmod = max(obj.solarmod,tech.solarMod * techEff * opStatus)
elif tech.solarMod * opStatus < 0:
obj.solarmod = min(obj.solarmod,tech.solarMod * techEff * opStatus)
#@log.debug("IPlanet - oper status", obj.oid, struct, opStatus)
# set status bits
if tech.operBio > obj.storBio: struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_NOBIO
if tech.operEn > obj.storEn: struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_NOEN
if tech.operWorkers > obj.unemployedPop: struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_NOPOP
# produce/consume
#@log.debug("Active structure", obj.oid, struct)
# bio
b, m, e, d = tech.prodBioMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.storBio += int(tech.prodBio * prodMod * techEff * opStatus) - int(tech.operBio * opStatus)
# en
b, m, e, d = tech.prodEnMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.storEn += int(tech.prodEn * prodMod * techEff * opStatus) - int(tech.operEn * opStatus)
obj.unemployedPop -= min(obj.unemployedPop, int(tech.operWorkers * opStatus))
obj.storPop += int(tech.prodPop * techEff * opStatus)
obj.scannerPwr = max(int(tech.scannerPwr * techEff * (2.0 - emrLevel) * opStatus), obj.scannerPwr)
# rebellion and combat has common penalty
b, m, e, d = tech.prodProdMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.prodProd += int(tech.prodProd * prodMod * techEff * opStatus)
# science
b, m, e, d = tech.prodSciMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.prodSci += int(tech.prodSci * prodMod * techEff * opStatus)
# refuelling & repairing
obj.refuelMax = max(obj.refuelMax, int(tech.refuelMax * techEff * opStatus))
if obj.revoltLen == 0 and not isCombat:
# refuelling
obj.refuelInc = max(obj.refuelInc, int(tech.refuelInc * techEff * opStatus))
# repair
obj.repairShip += tech.repairShip * techEff * opStatus
obj.upgradeShip += tech.upgradeShip * techEff * opStatus
# train
obj.trainShipMax = max(obj.trainShipMax, tech.trainShipMax)
obj.trainShipInc = max(obj.trainShipInc, tech.trainShipInc * techEff * opStatus)
# shielding
obj.maxShield = max(tech.planetShield * techEff * opStatus, obj.maxShield)
# stargates
obj.fleetSpeedBoost = max(obj.fleetSpeedBoost, tech.fleetSpeedBoost * techEff * opStatus)
# storage
obj.maxBio += int(tech.storBio * techEff)
obj.maxEn += int(tech.storEn * techEff)
obj.maxPop += int(tech.storPop * techEff)
obj.plEnv += int(tech.prodEnv * techEff * opStatus)
# morale modifier of the building
obj.moraleModifiers[1] += tech.moraleTrgt * techEff * opStatus
# auto repair/damage
# also damage structures on not owned planets
if struct[STRUCT_IDX_HP] < maxHP and opStatus > 0.0:
struct[STRUCT_IDX_HP] = min(maxHP, struct[STRUCT_IDX_HP] + max(int(maxHP * Rules.repairRunningRatio), 1))
struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_REPAIRING
elif struct[STRUCT_IDX_HP] > maxHP or opStatus <= 0.0:
# flag only for non functional structure
if opStatus <= 0.0:
struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_DETER
# damage it a bit
struct[STRUCT_IDX_HP] -= max(1, int(maxHP * Rules.decayRatio))
if obj.storPop > 0:
# do not fall below 1% of HP for populated planets
struct[STRUCT_IDX_HP] = max(struct[STRUCT_IDX_HP], maxHP / 100)
if struct[STRUCT_IDX_HP] <= 0:
# destroy building only if there is no population
destroyed.append(struct)
# do shield self generation
obj.prevShield = obj.shield #for planet display of shield growth
if obj.maxShield < obj.shield:
obj.shield = obj.maxShield
if obj.maxShield > obj.shield and not isCombat:
regenTemp = max(1, Rules.plShieldRegen* obj.maxShield) #always regen at at least 1
obj.shield = min(obj.shield + regenTemp, obj.maxShield) #don't let it regen over shieldMax
# pass scanner/... to the system
#@log.debug(obj.oid, "IPlanet scanner", obj.scannerPwr)
system.scannerPwrs[obj.owner] = max(obj.scannerPwr, system.scannerPwrs.get(obj.owner, 0))
# destroy destroyed buildings
for struct in destroyed:
obj.slots.remove(struct)
# process population
if obj.storPop > 0:
# the reserve is needed
#obj.maxPop = int((obj.maxPop + getattr(owner, "techLevel", 1) * Rules.tlPopReserve) * Rules.maxPopReserve)
obj.maxPop = int(obj.maxPop * Rules.maxPopReserve)
obj.maxPop += int((obj.plSlots - len(obj.slots)) * getattr(owner, "techLevel", 1) * Rules.tlPopReserve)
# max pop
maxPop = obj.maxPop
if obj.popEatBio: maxPop = min(maxPop, 1000.0 * obj.storBio / obj.popEatBio)
if obj.popEatEn: maxPop = min(maxPop, 1000.0 * obj.storEn / obj.popEatEn)
maxPop = int(maxPop)
# eat
pop = obj.storPop / 1000.0
wantBio = int(math.ceil(pop * obj.popEatBio))
wantEn = int(math.ceil(pop * obj.popEatEn))
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio += wantBio * Rules.autoMinStorTurns
obj.minEn += wantEn * Rules.autoMinStorTurns
# consume resources
obj.storBio -= min(obj.storBio, wantBio)
obj.storEn -= min(obj.storEn, wantEn)
# modify pop
if obj.storPop > maxPop:
# die
obj.storPop -= max(int((obj.storPop - maxPop) * Rules.popDieRate), Rules.popMinDieRate)
#if obj.storPop < maxPop: obj.storPop = maxPop
# do not generate this message when construction has been destroyed
# and do not lower morale too
if obj.storPop < obj.maxPop:
#@Utils.sendMessage(tran, obj, MSG_NOSUPPORT_POP, obj.oid, None)
obj.morale = max(obj.morale - Rules.moraleLostNoFood,0)
elif obj.storPop < maxPop:
# born
obj.storPop += max(min(int(obj.storPop * Rules.popGrowthRate), maxPop - obj.storPop), Rules.popMinGrowthRate)
# produce items in construction queue
if owner:
moraleBonus = Rules.moraleProdBonus[int(obj.morale / Rules.moraleProdStep)]
prod = obj.effProdProd = max(0, int(obj.prodProd * (owner.prodEff + moraleBonus)))
if (obj.morale > 15 and prod == 0 and obj.prodProd > 0 and owner.prodEff > 0): #added for super-low moral bonus issues
prod = obj.effProdProd = 1
else:
prod = obj.prodProd
index = 0
missing = [0, 0, 0, 0, 0]
idleProd = 0.0
# empty queue should be filled by global queue
if len(obj.prodQueue) == 0 and prod:
task = self.cmd(obj).popGlobalQueue(tran, obj)
if task:
obj.prodQueue.append(task)
while prod > 0 and index < len(obj.prodQueue):
item = obj.prodQueue[index]
# check if owner has this tech
if not item.isShip and item.techID not in owner.techs:
# bad tech
del obj.prodQueue[index]
# TODO send message
# set target
target = tran.db[item.targetID]
# set tech and build conditions
if item.isShip:
tech = tran.db[obj.owner].shipDesigns[item.techID]
mod = Rules.buildOnSamePlanetMod
else:
tech = Rules.techs[item.techID]
# check validity of the project
if not tech.validateConstrHandler(tran, obj, target, tech):
index += 1
# message to player
Utils.sendMessage(tran, obj, MSG_INVALID_TASK, obj.oid, item.techID)
continue
# building on other planet is more expensive
if item.targetID == obj.oid:
mod = Rules.buildOnSamePlanetMod
else:
mod = Rules.buildOnAnotherPlanetMod
# compute needs (do not consume resources under minimal storage)
wantProd = min(int(tech.buildProd * mod / tech.buildTurns - item.currProd), prod)
# production
item.changePerc = wantProd * 10000 / (tech.buildProd * mod)
# consume / produce
if item.techID == Rules.Tech.IDLETASK and item.isShip == 0:
idleProd += wantProd
prod -= wantProd
item.currProd += wantProd
# check, if production is complete
if item.currProd >= tech.buildProd * mod:
# item is complete
if item.isShip:
# find commander's fleet
fleet = None
# check if current system has any redirection
hasRedirection = obj.compOf in owner.shipRedirections
for fleetID in system.fleets:
tmpFleet = tran.db[fleetID]
if tmpFleet.owner == obj.owner and Utils.isIdleFleet(tmpFleet):
fleet = tmpFleet
break
if not fleet or hasRedirection:
fleet = self.new(T_FLEET)
tran.db.create(fleet)
self.cmd(fleet).create(tran, fleet, system, obj.owner)
self.cmd(fleet).addAction(tran, fleet, 0, FLACTION_REDIRECT, OID_NONE, None)
# add ships to the fleet
self.cmd(fleet).addNewShip(tran, fleet, item.techID)
if item.reportFin and item.quantity == 1:
Utils.sendMessage(tran, obj, MSG_COMPLETED_SHIP, obj.oid, item.techID)
elif tech.isStructure:
# if there is struct to demolish, find it and delete it
if item.demolishStruct != OID_NONE:
structToDemolish = None
for struct in target.slots:
if struct[STRUCT_IDX_TECHID] == item.demolishStruct:
structToDemolish = struct
break
if structToDemolish:
# struct found -- delete it
target.slots.remove(structToDemolish)
else:
# well, this can be a problem?
# shall we report it? (TODO: decide)
pass
if len(target.slots) < target.plSlots:
target.slots.append(Utils.newStructure(tran, item.techID, obj.owner))
try:
tech.finishConstrHandler(tran, obj, target, tech)
except Exception:
log.warning("Cannot execute finish constr handler")
if item.reportFin and item.quantity == 1:
Utils.sendMessage(tran, obj, MSG_COMPLETED_STRUCTURE, target.oid, item.techID)
else:
# no free slot!
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_NOSLOT, target.oid, None)
elif tech.isProject:
tech.finishConstrHandler(tran, obj, target, tech)
if item.reportFin and item.quantity == 1:
Utils.sendMessage(tran, obj, MSG_COMPLETED_PROJECT, target.oid, item.techID)
else:
raise GameException('Unsupported type of technology %d ' % item.techID)
# remove item from prod queue
item.quantity -= 1
if item.quantity == 0:
# remove item from the queue
del obj.prodQueue[index]
# was it last item in the queue? pop the global one!
if index == len(obj.prodQueue):
task = self.cmd(obj).popGlobalQueue(tran, obj)
if task:
obj.prodQueue.append(task)
else:
# try to produce another item
item.currProd = 0
else:
# item is not complete stop production
index += 1
break
# decay items not currently produced
while index < len(obj.prodQueue):
item = obj.prodQueue[index]
item.currProd = max(0, int(item.currProd - max(item.currProd * Rules.decayRatio, 1)))
index += 1
# use excess raw CP to increase production elsewhere
prod += idleProd
#if obj.effProdProd > 0 and owner:
#owner.prodIncreasePool += float(prod) / obj.effProdProd * obj.prodProd
if prod > 0.0:
owner.prodIncreasePool += prod
#if prod > 1: # ignore rounding error
# # report wasting production points
# Utils.sendMessage(tran, obj, MSG_WASTED_PRODPTS, obj.oid, (prod,))
# auto environment changes
downgradeTo = Rules.planetSpec[obj.plType].downgradeTo
solarminus = 0
solarplus = 0
if obj.solarmod > 0:
solarplus = obj.solarmod
if obj.solarmod < 0:
solarminus = obj.solarmod
if downgradeTo is not None:
if (Rules.planetSpec[downgradeTo].upgradeEnReqs[0] > obj.plEn + solarplus) or (Rules.planetSpec[downgradeTo].upgradeEnReqs[1] < obj.plEn + solarminus):
# auto damage on plEn outside downgrade's upgrade range
obj.plEnv -= Rules.envAutoMod
if obj.plBio > Rules.planetSpec[obj.plType].maxBio:
# auto damage on plBio > maxBio of class # @log.debug('IPlanet', obj.oid, 'Env auto damage', obj.plType, obj.plBio, Rules.planetSpec[obj.plType].maxBio)
dEnv = int((obj.plBio - Rules.planetSpec[obj.plType].maxBio) * Rules.envAutoMod)
if obj.plEnv > 0:
obj.plEnv -= min(obj.plEnv, dEnv)
else:
obj.plEnv -= dEnv
# small chance of self-upgrading
spec = Rules.planetSpec[obj.plType]
if owner:
chance = int((obj.plBio - spec.maxBio) * Rules.envSelfUpgradeChance[owner.race])
else:
chance = int((obj.plBio - spec.maxBio) * Rules.envSelfUpgradeChance["H"])
if Utils.rand(0, 10001) < chance and spec.upgradeTo and \
obj.plEn + solarplus >= spec.upgradeEnReqs[0] and obj.plEn + solarminus <= spec.upgradeEnReqs[1]:
log.debug('IPlanet', obj.oid, 'Upgraded to', spec.upgradeTo)
obj.plType = spec.upgradeTo
Utils.sendMessage(tran, obj, MSG_UPGRADED_PLANET_ECO, obj.oid, spec.upgradeTo)
while obj.plEnv >= Rules.envInterval:
#@log.debug('IPlanet', obj.oid, 'Env improved')
obj.plEnv -= Rules.envInterval
obj.changeEnv += Rules.envInterval
if obj.plBio < 200: obj.plBio += 1
while obj.plEnv < 0:
if obj.plBio > 0:
obj.plBio -= 1
obj.plEnv += Rules.envInterval
obj.changeEnv -= Rules.envInterval
else:
obj.changeEnv += obj.plEnv
obj.plEnv = 0
# downgrade planet if necessary
if obj.plBio < Rules.planetSpec[obj.plType].minBio:
downgradeTo = Rules.planetSpec[obj.plType].downgradeTo
if downgradeTo:
log.debug('IPlanet', obj.oid, 'Downgraded to', downgradeTo)
obj.plType = downgradeTo
Utils.sendMessage(tran, obj, MSG_DOWNGRADED_PLANET_ECO, obj.oid, downgradeTo)
# record changes
obj.changeBio += obj.storBio
obj.changeEn += obj.storEn
obj.changePop += obj.storPop
obj.changeEnv += obj.plEnv
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio = min(obj.minBio, obj.maxBio / 2)
obj.minEn = min(obj.minEn, obj.maxEn / 2)
# science
if owner:
moraleBonus = Rules.moraleProdBonus[int(obj.morale / Rules.moraleProdStep)]
obj.effProdSci = max(0, int(obj.prodSci * (owner.sciEff + moraleBonus)))
owner.sciPoints += obj.effProdSci
# planet with no population cannot have an owner
# and planet with no owner cannot have population
if (obj.storPop <= 0 and obj.owner != OID_NONE) or obj.owner == OID_NONE:
# TODO: remove
#if obj.owner != OID_NONE:
# # send message
# Utils.sendMessage(tran, obj, MSG_LOST_PLANET, obj.oid, None)
# remove this planet from owner's planets
self.cmd(obj).changeOwner(tran, obj, OID_NONE, force = 1)
obj.storPop = 0
processPRODPhase.public = 1
processPRODPhase.accLevel = AL_ADMIN
def processACTIONPhase(self, tran, obj, data):
return
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def processFINALPhase(self, tran, obj, data):
if obj.storPop <= 0 and not obj.slots and obj.owner == OID_NONE:
# do not process this planet
return
# reset of "morale modifier by population" value
obj.moraleModifiers[2] = 0
system = tran.db[obj.compOf]
galaxy = tran.db[system.compOf]
if galaxy.timeEnabled and not galaxy.timeStopped:
# too much population affects morale (if there is more than base population)
if obj.storPop > Rules.moraleBasePop:
obj.moraleModifiers[2] -= Rules.moraleHighPopPenalty * obj.storPop / Rules.moraleBasePop
elif obj.storPop <= Rules.moraleLowPop:
obj.moraleModifiers[2] += Rules.moraleLowPopBonus
# count final morale values
obj.moraleModifiers[3] = obj.moraleModifiers[0] +obj.moraleModifiers[1] + obj.moraleModifiers[2]
obj.moraleTrgt = obj.moraleModifiers[3]
obj.moraleTrgt = max(0.0, min(obj.moraleTrgt, Rules.maxMorale))
if obj.morale > int(obj.moraleTrgt):
obj.morale -= max(1.0, (obj.morale - obj.moraleTrgt) * Rules.moraleChngPerc)
elif obj.morale < int(obj.moraleTrgt) and system.combatCounter == 0:
obj.morale += max(1.0, (obj.moraleTrgt - obj.morale) * Rules.moraleChngPerc)
#@log.debug('IPlanet', 'Mor Mor trgt/reb thr', obj.morale, obj.moraleTrgt)
# revolt?
if obj.revoltLen > 0:
obj.revoltLen += 1
if obj.morale < Rules.revoltThr and obj.owner != OID_NONE and obj.revoltLen == 0:
chance = (Rules.revoltThr - obj.morale) * Rules.moralePerPointChance
#@log.debug('IPlanet', 'Start revolt? mor, mor trgt, reb thr, chance', obj.morale, obj.moraleTrgt, chance)
if Utils.rand(0, 101) <= chance:
# rebelion starts
#@log.debug('IPlanet', 'Revolt on', obj.oid)
obj.revoltLen = 1
Utils.sendMessage(tran, obj, MSG_REVOLT_STARTED, obj.oid, None)
elif obj.revoltLen > 0 and obj.morale > Rules.revoltThr:
chance = (obj.morale - Rules.revoltThr) * Rules.moralePerPointChance
#@log.debug('IPlanet', 'Stop revolt? mor, mor trgt, reb thr, chance', obj.morale, obj.moraleTrgt, chance)
if Utils.rand(0, 101) <= chance:
# revolt ends
obj.revoltLen = 0
Utils.sendMessage(tran, obj, MSG_REVOLT_ENDED, obj.oid, None)
obj.morale = max(0.0, min(Rules.maxMorale, obj.morale))
obj.changeMorale += obj.morale
# when rebelling destroy some resources
if obj.revoltLen > 0:
obj.storBio -= int(obj.storBio * Rules.revoltDestrBio)
obj.storEn -= int(obj.storEn * Rules.revoltDestrEn)
# storage
obj.storBio = min(obj.storBio, obj.maxBio)
obj.storEn = min(obj.storEn, obj.maxEn)
#obj.storPop = min(obj.storPop, obj.maxPop) TODO remove
# collect stats
if obj.owner != OID_NONE:
player = tran.db[obj.owner]
player.stats.storPop += obj.storPop
player.stats.prodProd += obj.prodProd
player.stats.effProdProd += obj.effProdProd
player.stats.prodSci += obj.prodSci
player.stats.effProdSci += obj.effProdSci
player.stats.structs += len(obj.slots)
player.stats.slots += obj.plSlots
galaxyID = tran.db[obj.compOf].compOf
if galaxyID not in player.galaxies:
player.galaxies.append(galaxyID)
# morale computation
homePlanet = tran.db[player.planets[0]]
dist = int(math.sqrt((homePlanet.x - obj.x) ** 2 + (homePlanet.y - obj.y) ** 2))
player.tmpPopDistr[dist] = player.tmpPopDistr.get(dist, 0) + obj.storPop
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
def getScanInfos(self, tran, obj, scanPwr, player):
if scanPwr >= Rules.level1InfoScanPwr:
result = IDataHolder()
result._type = T_SCAN
result.scanPwr = scanPwr
result.oid = obj.oid
result.signature = obj.signature
result.type = obj.type
result.orbit = obj.orbit
result.compOf = obj.compOf
result.x = obj.x
result.y = obj.y
result.plType = obj.plType
if scanPwr >= Rules.level2InfoScanPwr:
result.plDiameter = obj.plDiameter
if getattr(obj, "plType", 'X') != 'G':
result.plMin = obj.plMin
result.plBio = obj.plBio
result.plEn = obj.plEn
result.plSlots = obj.plSlots
result.plStratRes = obj.plStratRes
result.plMaxSlots = obj.plMaxSlots
if scanPwr >= Rules.level3InfoScanPwr:
result.name = obj.name
result.storPop = obj.storPop
result.owner = obj.owner
#XXX result.plMaxMoonsSlots = obj.plMaxMoonsSlots
if scanPwr >= Rules.level4InfoScanPwr:
# TODO provide less information
result.hasRefuel = (obj.refuelInc > 0) #simple detect if docks exist for problems dialog
result.slots = obj.slots
#XXX result.plMoonsSlots = obj.plMoonsSlots
result.shield = obj.shield
result.prevShield = -1
result.maxShield = -1
if scanPwr >= Rules.partnerScanPwr:
result.maxShield = obj.maxShield
result.prevShield = obj.prevShield
result.refuelMax = obj.refuelMax
result.refuelInc = obj.refuelInc
result.scannerPwr = obj.scannerPwr
result.trainShipInc = obj.trainShipInc
result.trainShipMax = obj.trainShipMax
result.upgradeShip = obj.upgradeShip
result.repairShip = obj.repairShip
result.fleetSpeedBoost = obj.fleetSpeedBoost
return [result]
def loadDOMNode(self, tran, obj, xoff, yoff, orbit, node):
obj.x = xoff
obj.y = yoff
obj.orbit = orbit
for elem in node.childNodes:
if elem.nodeType == Node.ELEMENT_NODE:
name = elem.tagName
if name == 'properties':
self.loadDOMAttrs(obj, elem)
elif name == 'startingpoint':
galaxy = tran.db[tran.db[obj.compOf].compOf]
galaxy.startingPos.append(obj.oid)
galaxy.numOfStartPos += 1
else:
raise GameException('Unknown element %s' % name)
return SUCC
def update(self, tran, obj):
# clean up negative build queues and fix missing demolishStruct keys
loopAgain = True
while loopAgain:
deletedKey = False
for key in range(0,len(obj.prodQueue)):
item = obj.prodQueue[key]
if not hasattr(item, "demolishStruct"):
item.demolishStruct = OID_NONE
if item.quantity < 0:
log.warning("Deleting negative item queue on", obj.oid,"for player",obj.owner)
if item.isShip:
tech = player.shipDesigns[item.techID]
else:
tech = Rules.techs[item.techID]
player = tran.db[obj.owner]
for sr in tech.buildSRes:
player.stratRes[sr] = player.stratRes.get(sr, 0) + item.quantity #quantity negative, so subtracting strat resources
# del the bad item. Since this changes indicies, start the check over again on remaining items
deletedKey = True
del obj.prodQueue[key]
break
# no more bad entries found; break the while loop
if not deletedKey:
loopAgain = False
# TODO: remove in 0.5.34
for struct in obj.slots:
if len(struct) < 4:
# add oper status
struct.append(100)
# change owner to OID_NONE when owner is invalid
if obj.owner != OID_NONE:
player = tran.db.get(obj.owner, None)
if not player or player.type not in PLAYER_TYPES or obj.oid not in player.planets:
# TODO this can be a probem - this planet cannot be attacked!
log.warning("Changing owner to OID_NONE - invalid owner", obj)
self.cmd(obj).changeOwner(tran, obj, OID_NONE, force = 1)
# kill all population
obj.storPop = 0
return
# TODO: remove in 0.5.65
obj.storBio = int(obj.storBio)
obj.storEn = int(obj.storEn)
# TODO: remove in 0.5.69
if not hasattr(obj, "globalQueue"):
obj.globalQueue = 0
# check compOf
if not tran.db.has_key(obj.compOf) or tran.db[obj.compOf].type != T_SYSTEM:
log.debug("CONSISTENCY invalid compOf for planet", obj.oid)
# fix signature
obj.signature = 75
if not hasattr(obj, 'moraleModifiers'):
obj.moraleModifiers = [ 0.0 , 0.0 , 0.0 , 0.0 ]
update.public = 0
def changePlanetsGlobalQueue(self, tran, obj, newQueue):
player = tran.db[obj.owner]
if newQueue < 0 or newQueue >= len(player.prodQueues):
raise GameException("Invalid queue")
obj.globalQueue = newQueue
return obj.globalQueue
changePlanetsGlobalQueue.public = 1
changePlanetsGlobalQueue.accLevel = AL_FULL
def popGlobalQueue(self, tran, obj):
player = tran.db[obj.owner]
queue = obj.globalQueue
task = None
if len(player.prodQueues[queue]):
task = copy.copy(player.prodQueues[queue][0])
if task.quantity > 1:
player.prodQueues[queue][0].quantity -= 1
else:
if task.reportFin:
Utils.sendMessage(tran, obj, MSG_QUEUE_TASK_ALLOTED, OID_NONE, (queue, task.techID))
del player.prodQueues[queue][0]
# add other demanded values, report finalization was used to report allot (to prevent reporting every unit)
task.reportFin = 0
task.quantity = 1
task.isShip = task.techID < 1000
task.targetID = obj.oid
task.currProd = 0
task.demolishStruct = OID_NONE
return task
popGlobalQueue.public = 0
def deleteDesign(self, tran, obj, designID, keepWIP = 0):
# TODO: handle stategic resources
for task in obj.prodQueue[:]:
if task.isShip and task.techID == designID:
if task.currProd > 0 and keepWIP:
self.cmd(obj).changeConstruction(tran, obj, obj.procQueue.index(task), 1)
else:
self.cmd(obj).abortConstruction(tran, obj, obj.prodQueue.index(task))
deleteDesign.public = 0
def changeShipDesign(self, tran, obj, oldDesignID, newDesignID):
# TODO: handle strategic resources
for task in obj.prodQueue[:]:
if task.isShip and task.techID == oldDesignID:
task.techID = newDesignID
task.currProd = int(task.currProd / Rules.shipUpgradeMod)
changeShipDesign.public = 0
##
## Asteroids
##
def generateAsteroid(self, tran, obj):
return
assert obj.plType == "A"
#
modifier = pow(
max(Rules.asteroidMinPlMinAbund / 100.0, obj.plMin / 100.0),
Rules.asteroidModPwr,
)
# get probability
prob = Rules.asteroidGenerPerc * modifier
#@log.debug("Asteroids ?", prob, modifier, int(Rules.asteroidMinHP * modifier), int(Rules.asteroidMaxHP * modifier))
if prob < random.random():
# bad luck
return
# new asteroid - gener hit points and speed
hp = random.randrange(
int(Rules.asteroidMinHP * modifier),
int(Rules.asteroidMaxHP * modifier)
)
speed = Rules.asteroidMinSpeed + random.random() * \
(Rules.asteroidMaxSpeed - Rules.asteroidMinSpeed)
# position
system = tran.db[obj.compOf]
# select target
if Rules.asteroidTargetInSystem < random.random():
# TODO: target nearby system
objIDs = []
# pick one target (except this system)
while True:
systemID = random.choice(objIDs)
tmpSystem = tran.db[systemID]
if tmpSystem.type == T_SYSTEM and systemID != system.oid:
break
# select planet
targetID = random.choice(tmpSystem.planets)
else:
# select planet in this system
while True:
targetID = random.choice(system.planets)
if targetID != obj.oid:
# don't target yourself
break
# create asteroid
asteroid = self.new(T_ASTEROID)
tran.db.create(asteroid)
self.cmd(asteroid).create(tran, asteroid, system.x, system.y, targetID, speed, hp)
##
## Combat related functions
##
def getPreCombatData(self, tran, obj):
# scan buildings and fire their weapons
shots = {0: [], 1: [], 2: [], 3: []}
if obj.owner == OID_NONE:
return shots, [0, 0, 0, 8], False
player = tran.db[obj.owner]
system = tran.db[obj.compOf]
desCount = {}
firing = False
systemAtt = 0;
systemDef = 0;
for struct in obj.slots:
structTechID = struct[STRUCT_IDX_TECHID]
opStatus = struct[STRUCT_IDX_OPSTATUS] / 100.0
tech = Rules.techs[structTechID]
desCount[structTechID] = desCount.get(structTechID, 0) + 1
wpnCount = {}
if not tech.structWeapons:
continue
firing = True
for cClass in range(0, 4):
weaponID = player.planetWeapons[cClass]
if weaponID is None:
continue
weapon = Rules.techs[weaponID]
maxWeaponCount = int(tech.structWeapons[cClass] * opStatus)
for weaponIdx in range(0, maxWeaponCount):
#@log.debug(obj.oid, "FIRING PLANET WEAPON", weapon.name)
wpnCount[weaponID] = wpnCount.get(weaponID, 0) + 1
#
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
# base attack
attack = tech.combatAtt + int(weapon.weaponAtt * weaponEff)
# because ALL counters starts at 1, subtract 3
count = system.combatCounter + desCount[structTechID] + wpnCount[weaponID] - 2
# add to attacks
#@log.debug('IPlanet', obj.oid, structTechID, "Count", count, 'Shots', weapon.name, ShipUtils.getRounds(weapon.weaponROF, count))
for round in xrange(0, ShipUtils.getRounds(weapon.weaponROF, count)):
shots[weapon.weaponClass].append((attack, weaponID))
# hit limit
obj.maxHits = len(obj.slots)
obj.hitCounter = 0
obj.lastHitClass = 3
obj.hitMod = 1.0
log.debug(obj.oid, "Combat settings", obj.maxHits)
# +1 means population only hit
return shots, [0, 0, 0, 8], firing
getPreCombatData.public = 0
def applyShot(self, tran, obj, defense, attack, weaponID, cClass, count):
#@log.debug('IPlanet', 'Apply shot', weaponID, attack, cClass, count)
# compute chance to hit
weapon = Rules.techs[weaponID]
#system defense bonus is dropped for planets...structures can't move; just calculate defense off structure defense
defense = Rules.combatStructDefense
destroyed = 0
dmg = 0
# limit number of shots
if weapon.weaponClass < obj.lastHitClass:
#@log.debug(obj.oid, "Different class", obj.lastHitClass, weapon.weaponClass, obj.maxHits)
obj.maxHits = int(Rules.combatHitXferMod * obj.maxHits * (obj.lastHitClass - weapon.weaponClass))
obj.hitCounter = int(Rules.combatHitXferMod * obj.hitCounter * (obj.lastHitClass - weapon.weaponClass))
obj.lastHitClass = weapon.weaponClass
if weapon.weaponROF > 1:
#@log.debug(obj.oid, "Increasing counter PL", 1.0 / weapon.weaponROF)
obj.hitCounter += 1.0 / weapon.weaponROF
else:
#@log.debug(obj.oid, "Increasing counter PL", 1)
obj.hitCounter += 1
if obj.hitCounter > obj.maxHits:
obj.hitCounter = 0
obj.hitMod *= Rules.combatStructureHitMod
#@log.debug(obj.oid, "Increasing hit penalty", obj.hitMod, obj.maxHits)
attackChance = obj.hitMod * attack / (attack + defense)
#@log.debug(obj.oid, "Chance to attack", attackChance, obj.hitMod, obj.hitCounter, obj.maxHits,
#@ "without penalty:", float(attack) / (attack + defense))
#@log.debug('IPlanet', obj.oid, 'HIT?', attack + defense + 1, defense)
absorb = 0 #for when it doesn't hit
if random.random() <= attackChance:
# hit
player = tran.db[obj.owner]
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
dmg = ShipUtils.computeDamage(weapon.weaponClass, 3, weapon.weaponDmgMin, weapon.weaponDmgMax, weaponEff)
#@log.debug(obj.oid, 'HIT! att=%d vs def=%d, dmg=%d '% (attack, defense, dmg))
#shield strike
if obj.shield > 0:
absorb = min(dmg,obj.shield)
obj.shield -= absorb
dmg -= absorb
if dmg == 0:
return 0+absorb, 0, 3
# select slot
if count == 7 or not obj.slots:
#@log.debug('IPlanet', 'Population hit')
# population hit
if obj.storPop == 0:
dmg = 0
else:
# free slot hit -> dmg population
# OLD dmgPop = int(Rules.popPerSlot * float(dmg) / Rules.popSlotHP * Rules.popKillMod)
dmgPop = int(dmg * Rules.popSlotKillMod)
obj.storPop = max(obj.storPop - dmgPop, 0)
obj.changePop -= dmgPop
if obj.storPop > 0:
obj.morale -= Rules.moraleModPlHit * float(dmgPop) / float(obj.storPop)
#@log.debug('IPlanet', obj.oid, 'Morale penalty', dmg, maxHP, Rules.moraleModPlHit * float(dmg) / float(maxHP))
elif count < 0:
# TODO can be count negative?
log.warning('IPlanet', 'applyShot: count is negative')
else:
if count == 6:
# random structure hit
#@log.debug('IPlanet', 'Random structure hit')
struct = obj.slots[Utils.rand(0, len(obj.slots))]
else:
# most damaged structure hit
#@log.debug('IPlanet', 'Most damaged structure hit')
struct = obj.slots[-1]
for tmpStruct in obj.slots:
if tmpStruct[STRUCT_IDX_HP] <= struct[STRUCT_IDX_HP]:
struct = tmpStruct
# compute sum hp of all buildings
sumHP = 0
for tmpStruct in obj.slots:
sumHP += tmpStruct[STRUCT_IDX_HP]
# damage building
struct[STRUCT_IDX_HP] -= dmg
# "damage" population
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
# compute struct effectivity
techEff = Utils.getTechEff(tran, struct[STRUCT_IDX_TECHID], obj.owner)
maxHP = int(tech.maxHP * techEff)
dmgPop = int(tech.operWorkers * float(dmg) / maxHP * Rules.popKillMod)
obj.storPop = max(obj.storPop - dmgPop, 0)
obj.changePop -= dmgPop
# destroy building
if struct[STRUCT_IDX_HP] <= 0:
destroyed = 1
dmg += struct[STRUCT_IDX_HP]
obj.slots.remove(struct)
# compute morale penalty
if dmg:
obj.morale -= Rules.moraleModPlHit * float(dmg) / float(sumHP)
#@log.debug('IPlanet', obj.oid, 'Morale penalty', dmg, sumHP, Rules.moraleModPlHit * float(dmg) / float(sumHP))
#@log.debug('IPlanet', 'Shot applied', dmg, destroyed)
# when destroyed, only class 3 (structure) i valid
return dmg+absorb, destroyed, 3
applyShot.public = 0
def distributeExp(self, tran, obj):
# TODO - will buildings have exp? Answ: NO
if hasattr(obj, "maxHits"):
del obj.maxHits
del obj.hitCounter
del obj.lastHitClass
del obj.hitMod
distributeExp.public = 0
def surrenderTo(self, tran, obj, newOwnerID):
# morale is lost when this is called
obj.morale -= Rules.moraleLostWhenSurrender
if obj.morale >= Rules.revoltThr:
#@log.debug('IPlanet', 'Surrender - revolt thr not reached', obj.morale)
return 0
chance = (Rules.revoltThr - obj.morale) * Rules.moralePerPointChance
#@log.debug('IPlanet', 'Surrender? mor, mor trgt, reb thr, chance', obj.morale, obj.moraleTrgt, chance)
if Utils.rand(0, 101) > chance:
# do not surrender!
#@log.debug('IPlanet', 'Surrender - pure luck', obj.morale, obj.revoltLen)
return 0
# we've lost the battle - we have a new owner
#@log.debug('IPlanet', 'Surrender - surrending to', newOwnerID)
newOwner = tran.db[newOwnerID]
if newOwner.type == T_PIRPLAYER or newOwner.type == T_AIPIRPLAYER:
# special handling for pirates
currentTurn = tran.db[OID_UNIVERSE].turn
# prevent abuse - require 8 turns between capturing the same planet and require the owner to control the planet at least 2 turns if you want to gain fame & tech (two turns prevents orbiting pirate fleet from immediately bombing)
if (currentTurn - obj.lastPirCapture) > 8 and (currentTurn - obj.ownerSince) > 2:
# gain/lose fame
self.cmd(newOwner).capturePlanet(tran, newOwner, obj)
# steal ship techs
self.cmd(newOwner).stealTechs(tran, newOwner, obj.owner, obj.oid)
else:
log.debug(obj.oid, "Pirate captured planet too soon after previous capture or colonization to gain bonuses", obj.oid)
obj.storPop = 0
obj.lastPirCapture = currentTurn
self.cmd(obj).changeOwner(tran, obj, OID_NONE, force = 1)
else:
# change owner
self.cmd(obj).changeOwner(tran, obj, newOwnerID, force = 1)
# blow up all military buildings
for struct in obj.slots[:]:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
if tech.isMilitary:
obj.slots.remove(struct)
return 1
surrenderTo.public = 0
| Lukc/ospace-lukc | server/lib/ige/ospace/IPlanet.py | Python | gpl-2.0 | 47,036 | [
"Galaxy"
] | 1f82f519c2e464ed60585151e7b5c5366dba2b8c43e0667dda452b4fd248269e |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PCR-GLOBWB (PCRaster Global Water Balance) Global Hydrological Model
#
# Copyright (C) 2016, Ludovicus P. H. (Rens) van Beek, Edwin H. Sutanudjaja, Yoshihide Wada,
# Joyce H. C. Bosmans, Niels Drost, Inge E. M. de Graaf, Kor de Jong, Patricia Lopez Lopez,
# Stefanie Pessenteiner, Oliver Schmitz, Menno W. Straatsma, Niko Wanders, Dominik Wisser,
# and Marc F. P. Bierkens,
# Faculty of Geosciences, Utrecht University, Utrecht, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import types
import math
import types
from pcraster.framework import *
import pcraster as pcr
import logging
logger = logging.getLogger(__name__)
import virtualOS as vos
from ncConverter import *
import waterBodies
class Routing(object):
#TODO: remove
def getPseudoState(self):
result = {}
return result
#TODO: remove
def getVariables(self, names):
result = {}
return result
def getState(self):
result = {}
result['timestepsToAvgDischarge'] = self.timestepsToAvgDischarge # day
result['channelStorage'] = self.channelStorage # m3 ; channel storage, including lake and reservoir storage
result['readAvlChannelStorage'] = self.readAvlChannelStorage # m3 ; readily available channel storage that can be extracted to satisfy water demand
result['avgDischargeLong'] = self.avgDischarge # m3/s ; long term average discharge
result['m2tDischargeLong'] = self.m2tDischarge # (m3/s)^2
result['avgBaseflowLong'] = self.avgBaseflow # m3/s ; long term average baseflow
result['riverbedExchange'] = self.riverbedExchange # m3/day : river bed infiltration (from surface water bdoies to groundwater)
result['waterBodyStorage'] = self.waterBodyStorage # m3 ; storages of lakes and reservoirs # values given are per water body id (not per cell)
result['avgLakeReservoirOutflowLong'] = self.avgOutflow # m3/s ; long term average lake & reservoir outflow # values given are per water body id (not per cell)
result['avgLakeReservoirInflowShort'] = self.avgInflow # m3/s ; short term average lake & reservoir inflow # values given are per water body id (not per cell)
result['avgDischargeShort'] = self.avgDischargeShort # m3/s ; short term average discharge
# This variable needed only for kinematic wave methods (i.e. kinematicWave and simplifiedKinematicWave)
result['subDischarge'] = self.subDischarge # m3/s ; sub-time step discharge (needed for kinematic wave methods/approaches)
return result
def __init__(self,iniItems,initialConditions,lddMap):
object.__init__(self)
self.lddMap = lddMap
self.cloneMap = iniItems.cloneMap
self.tmpDir = iniItems.tmpDir
self.inputDir = iniItems.globalOptions['inputDir']
# option to activate water balance check
self.debugWaterBalance = True
if iniItems.routingOptions['debugWaterBalance'] == "False":
self.debugWaterBalance = False
self.method = iniItems.routingOptions['routingMethod']
# option to include lakes and reservoirs
self.includeWaterBodies = True
if 'includeWaterBodies' in iniItems.routingOptions.keys():
if iniItems.routingOptions['includeWaterBodies'] == "False" or\
iniItems.routingOptions['includeWaterBodies'] == "None":
self.includeWaterBodies = False
# local drainage direction:
self.lddMap = vos.readPCRmapClone(iniItems.routingOptions['lddMap'],
self.cloneMap,self.tmpDir,self.inputDir,True)
self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))
self.lddMap = pcr.lddrepair(self.lddMap)
# landmask:
if iniItems.globalOptions['landmask'] != "None":
self.landmask = vos.readPCRmapClone(\
iniItems.globalOptions['landmask'],
self.cloneMap,self.tmpDir,self.inputDir)
else:
self.landmask = pcr.defined(self.lddMap)
self.landmask = pcr.ifthen(pcr.defined(self.lddMap), self.landmask)
self.landmask = pcr.cover(self.landmask, pcr.boolean(0))
# ldd mask
self.lddMap = pcr.lddmask(self.lddMap, self.landmask)
# cell area (unit: m2)
self.cellArea = vos.readPCRmapClone(\
iniItems.routingOptions['cellAreaMap'],
self.cloneMap,self.tmpDir,self.inputDir)
# model resolution in arc-degree unit
self.cellSizeInArcDeg = vos.getMapAttributes(self.cloneMap,"cellsize")
# maximum number of days (timesteps) to calculate long term average flow values (default: 5 years = 5 * 365 days = 1825)
self.maxTimestepsToAvgDischargeLong = 1825.
# maximum number of days (timesteps) to calculate short term average values (default: 1 month = 1 * 30 days = 30)
self.maxTimestepsToAvgDischargeShort = 30.
routingParameters = ['gradient','manningsN']
for var in routingParameters:
input = iniItems.routingOptions[str(var)]
vars(self)[var] = vos.readPCRmapClone(input,\
self.cloneMap,self.tmpDir,self.inputDir)
# parameters needed to estimate channel dimensions/parameters
# - used in the method/function 'getRoutingParamAvgDischarge'
self.eta = 0.25
self.nu = 0.40
self.tau = 8.00
self.phi = 0.58
# option to use minimum channel width (m)
self.minChannelWidth = pcr.scalar(0.0)
if "minimumChannelWidth" in iniItems.routingOptions.keys():
if iniItems.routingOptions['minimumChannelWidth'] != "None":\
self.minChannelWidth = pcr.cover(vos.readPCRmapClone(\
iniItems.routingOptions['minimumChannelWidth'],
self.cloneMap,self.tmpDir,self.inputDir), 0.0)
# option to use constant/pre-defined channel width (m)
self.predefinedChannelWidth = None
if "constantChannelWidth" in iniItems.routingOptions.keys():
if iniItems.routingOptions['constantChannelWidth'] != "None":\
self.predefinedChannelWidth = pcr.cover(vos.readPCRmapClone(\
iniItems.routingOptions['constantChannelWidth'],
self.cloneMap,self.tmpDir,self.inputDir), 0.0)
# option to use constant/pre-defined channel depth (m)
self.predefinedChannelDepth = None
if "constantChannelDepth" in iniItems.routingOptions.keys():
if iniItems.routingOptions['constantChannelDepth'] != "None":\
self.predefinedChannelDepth = pcr.cover(vos.readPCRmapClone(\
iniItems.routingOptions['constantChannelDepth'],
self.cloneMap,self.tmpDir,self.inputDir), 0.0)
# an assumption for broad sheet flow in kinematic wave methods/approaches
self.beta = 0.6
# channelLength = approximation of channel length (unit: m)
# This is approximated by cell diagonal.
cellSizeInArcMin = self.cellSizeInArcDeg*60.
verticalSizeInMeter = cellSizeInArcMin*1852.
#
self.cellLengthFD = ((self.cellArea/verticalSizeInMeter)**(2)+\
(verticalSizeInMeter)**(2))**(0.5)
self.channelLength = self.cellLengthFD
#
# channel length (unit: m)
if "channelLength" in iniItems.routingOptions.keys():
if iniItems.routingOptions['channelLength'] != "None":\
self.channelLength = pcr.cover(
vos.readPCRmapClone(\
iniItems.routingOptions['channelLength'],
self.cloneMap,self.tmpDir,self.inputDir), self.channelLength)
# dist2celllength in m/arcDegree (needed in the accuTravelTime function):
nrCellsDownstream = pcr.ldddist(self.lddMap,\
self.lddMap == 5,1.)
distanceDownstream = pcr.ldddist(self.lddMap,\
self.lddMap == 5,\
self.channelLength)
channelLengthDownstream = \
(self.channelLength + distanceDownstream)/\
(nrCellsDownstream + 1) # unit: m
self.dist2celllength = channelLengthDownstream /\
self.cellSizeInArcDeg # unit: m/arcDegree
# the channel gradient must be >= minGradient
minGradient = 0.00005 # 0.000005
self.gradient = pcr.max(minGradient,\
pcr.cover(self.gradient, minGradient))
# initiate/create WaterBody class
self.WaterBodies = waterBodies.WaterBodies(iniItems,self.landmask)
# crop evaporation coefficient for surface water bodies
self.no_zero_crop_water_coefficient = True
if iniItems.routingOptions['cropCoefficientWaterNC'] == "None":
self.no_zero_crop_water_coefficient = False
else:
self.fileCropKC = vos.getFullPath(\
iniItems.routingOptions['cropCoefficientWaterNC'],\
self.inputDir)
# courantNumber criteria for numerical stability in kinematic wave methods/approaches
self.courantNumber = 0.50
# empirical values for minimum number of sub-time steps:
design_flood_speed = 5.00 # m/s
design_length_of_sub_time_step = pcr.cellvalue(
pcr.mapminimum(
self.courantNumber * self.channelLength / design_flood_speed),1)[0]
self.limit_num_of_sub_time_steps = np.ceil(
vos.secondsPerDay() / design_length_of_sub_time_step)
#
# minimum number of sub-time steps: 24 ; hourly resolution as used in Van Beek et al. (2011)
self.limit_num_of_sub_time_steps = max(24.0, self.limit_num_of_sub_time_steps)
# minimum number of a sub time step based on the configuration/ini file:
if 'maxiumLengthOfSubTimeStep' in iniItems.routingOptions.keys():
maxiumLengthOfSubTimeStep = float(iniItems.routingOptions['maxiumLengthOfSubTimeStep'])
minimum_number_of_sub_time_step = np.ceil(
vos.secondsPerDay() / maxiumLengthOfSubTimeStep )
self.limit_num_of_sub_time_steps = max(\
minimum_number_of_sub_time_step, \
self.limit_num_of_sub_time_steps)
#
self.limit_num_of_sub_time_steps = np.int(self.limit_num_of_sub_time_steps)
# critical water height (m) used to select stable length of sub time step in kinematic wave methods/approaches
self.critical_water_height = 0.25; # used in Van Beek et al. (2011)
# assumption for the minimum fracwat value used for calculating water height
self.min_fracwat_for_water_height = 0.001 # dimensionless
# assumption for minimum crop coefficient for surface water bodies
self.minCropWaterKC = 0.00
if 'minCropWaterKC' in iniItems.routingOptions.keys():
self.minCropWaterKC = float(iniItems.routingOptions['minCropWaterKC'])
# get the initialConditions
self.getICs(iniItems, initialConditions)
# flood plain options:
#################################################################################
self.floodPlain = iniItems.routingOptions['dynamicFloodPlain'] == "True"
if self.floodPlain:
logger.info("Flood plain extents can vary during the simulation.")
# get ManningsN for the flood plain areas
input = iniItems.routingOptions['floodplainManningsN']
self.floodplainManN = vos.readPCRmapClone(input,\
self.cloneMap, self.tmpDir, self.inputDir)
# reduction parameter of smoothing interval and error threshold
self.reductionKK = 0.5
if 'reductionKK' in iniItems.routingOptions.keys():
self.reductionKK= float(iniItems.routingOptions['reductionKK'])
self.criterionKK = 40.0
if 'criterionKK' in iniItems.routingOptions.keys():
self.criterionKK= float(iniItems.routingOptions['criterionKK'])
# get relative elevation (above floodplain) profile per grid cell (including smoothing parameters)
self.nrZLevels, self.areaFractions, self.relZ, self.floodVolume, self.kSlope, self.mInterval = \
self.getElevationProfile(iniItems)
# get bankfull capacity (unit: m3)
self.predefinedBankfullCapacity = None
self.usingFixedBankfullCapacity = False
if iniItems.routingOptions['bankfullCapacity'] != "None" :
self.usingFixedBankfullCapacity = True
self.predefinedBankfullCapacity = vos.readPCRmapClone(iniItems.routingOptions['bankfullCapacity'],\
self.cloneMap, self.tmpDir, self.inputDir)
else:
msg = "The bankfull channel storage capacity is NOT defined in the configuration file. "
if isinstance(self.predefinedChannelWidth, types.NoneType) or\
isinstance(self.predefinedChannelDepth, types.NoneType):
msg += "The bankfull capacity is estimated from average discharge (5 year long term average)."
else:
msg += "The bankfull capacity is estimated from the given channel depth and channel width."
self.usingFixedBankfullCapacity = True
self.predefinedBankfullCapacity = self.estimateBankfullCapacity(self.predefinedChannelWidth,\
self.predefinedChannelDepth)
logger.info(msg)
# covering the value
self.predefinedBankfullCapacity = pcr.cover(self.predefinedBankfullCapacity, 0.0)
# zero fracwat assumption (used for debugging to the version 1)
self.zeroFracWatAllAndAlways = False
if iniItems.debug_to_version_one: self.zeroFracWatAllAndAlways = True
# initiate old style reporting # This is still very useful during the 'debugging' process.
self.initiate_old_style_routing_reporting(iniItems)
def getICs(self,iniItems,iniConditions = None):
if iniConditions == None:
# read initial conditions from pcraster maps listed in the ini file (for the first time step of the model; when the model just starts)
self.timestepsToAvgDischarge = vos.readPCRmapClone(iniItems.routingOptions['timestepsToAvgDischargeIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
self.channelStorage = vos.readPCRmapClone(iniItems.routingOptions['channelStorageIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
self.readAvlChannelStorage = vos.readPCRmapClone(iniItems.routingOptions['readAvlChannelStorageIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
self.avgDischarge = vos.readPCRmapClone(iniItems.routingOptions['avgDischargeLongIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
self.m2tDischarge = vos.readPCRmapClone(iniItems.routingOptions['m2tDischargeLongIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
self.avgBaseflow = vos.readPCRmapClone(iniItems.routingOptions['avgBaseflowLongIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
self.riverbedExchange = vos.readPCRmapClone(iniItems.routingOptions['riverbedExchangeIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
# New initial condition variable introduced in the version 2.0.2: avgDischargeShort
self.avgDischargeShort = vos.readPCRmapClone(iniItems.routingOptions['avgDischargeShortIni'] ,self.cloneMap,self.tmpDir,self.inputDir)
# Initial conditions needed for kinematic wave methods
self.subDischarge = vos.readPCRmapClone(iniItems.routingOptions['subDischargeIni'],self.cloneMap,self.tmpDir,self.inputDir)
else:
# read initial conditions from the memory
self.timestepsToAvgDischarge = iniConditions['routing']['timestepsToAvgDischarge']
self.channelStorage = iniConditions['routing']['channelStorage']
self.readAvlChannelStorage = iniConditions['routing']['readAvlChannelStorage']
self.avgDischarge = iniConditions['routing']['avgDischargeLong']
self.m2tDischarge = iniConditions['routing']['m2tDischargeLong']
self.avgBaseflow = iniConditions['routing']['avgBaseflowLong']
self.riverbedExchange = iniConditions['routing']['riverbedExchange']
self.avgDischargeShort = iniConditions['routing']['avgDischargeShort']
self.subDischarge = iniConditions['routing']['subDischarge']
self.channelStorage = pcr.ifthen(self.landmask, pcr.cover(self.channelStorage, 0.0))
self.readAvlChannelStorage = pcr.ifthen(self.landmask, pcr.cover(self.readAvlChannelStorage, 0.0))
self.avgDischarge = pcr.ifthen(self.landmask, pcr.cover(self.avgDischarge, 0.0))
self.m2tDischarge = pcr.ifthen(self.landmask, pcr.cover(self.m2tDischarge, 0.0))
self.avgDischargeShort = pcr.ifthen(self.landmask, pcr.cover(self.avgDischargeShort, 0.0))
self.avgBaseflow = pcr.ifthen(self.landmask, pcr.cover(self.avgBaseflow, 0.0))
self.riverbedExchange = pcr.ifthen(self.landmask, pcr.cover(self.riverbedExchange, 0.0))
self.subDischarge = pcr.ifthen(self.landmask, pcr.cover(self.subDischarge , 0.0))
self.readAvlChannelStorage = pcr.min(self.readAvlChannelStorage, self.channelStorage)
self.readAvlChannelStorage = pcr.max(self.readAvlChannelStorage, 0.0)
# make sure that timestepsToAvgDischarge is consistent (or the same) for the entire map:
try:
self.timestepsToAvgDischarge = pcr.mapmaximum(self.timestepsToAvgDischarge)
except:
pass # We have to use 'try/except' because 'pcr.mapmaximum' cannot handle scalar value
# for netcdf reporting, we have to make sure that timestepsToAvgDischarge is spatial and scalar (especially while performing pcr2numpy operations)
self.timestepsToAvgDischarge = pcr.spatial(pcr.scalar(self.timestepsToAvgDischarge))
self.timestepsToAvgDischarge = pcr.ifthen(self.landmask, self.timestepsToAvgDischarge)
# Initial conditions needed for water bodies:
# - initial short term average inflow (m3/s) and
# long term average outflow (m3/s)
if iniConditions == None:
# read initial conditions from pcraster maps listed in the ini file (for the first time step of the model; when the model just starts)
self.avgInflow = vos.readPCRmapClone(iniItems.routingOptions['avgLakeReservoirInflowShortIni'],self.cloneMap,self.tmpDir,self.inputDir)
self.avgOutflow = vos.readPCRmapClone(iniItems.routingOptions['avgLakeReservoirOutflowLongIni'],self.cloneMap,self.tmpDir,self.inputDir)
if not isinstance(iniItems.routingOptions['waterBodyStorageIni'],types.NoneType):
self.waterBodyStorage = vos.readPCRmapClone(iniItems.routingOptions['waterBodyStorageIni'], self.cloneMap,self.tmpDir,self.inputDir)
self.waterBodyStorage = pcr.ifthen(self.landmask, pcr.cover(self.waterBodyStorage, 0.0))
else:
self.waterBodyStorage = None
else:
# read initial conditions from the memory
self.avgInflow = iniConditions['routing']['avgLakeReservoirInflowShort']
self.avgOutflow = iniConditions['routing']['avgLakeReservoirOutflowLong']
self.waterBodyStorage = iniConditions['routing']['waterBodyStorage']
self.avgInflow = pcr.ifthen(self.landmask, pcr.cover(self.avgInflow , 0.0))
self.avgOutflow = pcr.ifthen(self.landmask, pcr.cover(self.avgOutflow, 0.0))
if not isinstance(self.waterBodyStorage, types.NoneType):
self.waterBodyStorage = pcr.ifthen(self.landmask, pcr.cover(self.waterBodyStorage, 0.0))
def estimateBankfullDischarge(self, bankfullWidth, factor = 4.8):
# bankfull discharge (unit: m3/s)
# - from Lacey formula: P = B = 4.8 * (Qbf)**0.5
bankfullDischarge = (bankfullWidth / factor ) ** (2.0)
return bankfullDischarge
def estimateBankfullDepth(self, bankfullDischarge):
# bankfull depth (unit: m)
# - from the Manning formula
# - assuming rectangular channel
bankfullDepth = self.manningsN * ((bankfullDischarge)**(0.50))
bankfullDepth = bankfullDepth / (4.8 * ((self.gradient)**(0.50)))
bankfullDepth = bankfullDepth**(3.0/5.0)
return bankfullDepth
def estimateBankfullCapacity(self, width, depth, minWidth = 5.0, minDepth = 1.0):
# bankfull capacity (unit: m3)
bankfullCapacity = pcr.max(minWidth, width) * \
pcr.max(minDepth, depth) * \
self.channelLength
return bankfullCapacity
def getElevationProfile(self, iniItems):
# get the profile of relative elevation above the floodplain (per grid cell)
# output: dictionaries kSlope, mInterval, relZ and floodVolume with the keys iCnt (index, dimensionless)
# - nrZLevels : number of intervals/levels
# - areaFractions (dimensionless) : percentage/fraction of flooded/innundated area
# - relZ (m) : relative elevation above floodplain
# - floodVolume (m3) : flood volume above the channel bankfull capacity
# - kSlope (dimensionless) : slope used during the interpolation
# - mInterval (m3) : smoothing interval (used in the interpolation)
msg = 'Get the profile of relative elevation (relZ, unit: m) !!!'
logger.info(msg)
relativeElevationFileNC = None # TODO define relative elevation files in a netdf file.
if relativeElevationFileNC != None:
pass # TODO: using a netcdf file
if relativeElevationFileNC == None:
relZFileName = vos.getFullPath(iniItems.routingOptions['relativeElevationFiles'],\
iniItems.globalOptions['inputDir'])
# a dictionary contains areaFractions (dimensionless): fractions of flooded/innundated areas
areaFractions = map(float, iniItems.routingOptions['relativeElevationLevels'].split(','))
# number of levels/intervals
nrZLevels = len(areaFractions)
# - TODO: Read areaFractions and nrZLevels automatically.
########################################################################################################
#
# patch elevations: those that are part of sills are updated on the basis of the floodplain gradient
# using local distances deltaX per increment upto z[N] and the sum over sills
# - fill all lists (including smoothing interval and slopes)
relZ = [0.] * nrZLevels
for iCnt in range(0, nrZLevels):
if relativeElevationFileNC == None:
inputName = relZFileName %(areaFractions[iCnt] * 100)
relZ[iCnt] = vos.readPCRmapClone(inputName,
self.cloneMap, self.tmpDir, self.inputDir)
if relativeElevationFileNC != None:
pass # TODO: using a netcdf file
# covering elevation values
relZ[iCnt] = pcr.ifthen(self.landmask, pcr.cover(relZ[iCnt], 0.0))
# make sure that relZ[iCnt] >= relZ[iCnt-1] (added by Edwin)
if iCnt > 0: relZ[iCnt] = pcr.max(relZ[iCnt], relZ[iCnt-1])
# - minimum slope of floodplain
# being defined as the longest sill,
# first used to retrieve longest cumulative distance
deltaX = [self.cellArea**0.5] * nrZLevels
deltaX[0] = 0.0
sumX = deltaX[:]
minSlope = 0.0
for iCnt in range(nrZLevels):
if iCnt < nrZLevels-1:
deltaX[iCnt] = (areaFractions[iCnt+1]**0.5 - areaFractions[iCnt]**0.5) * deltaX[iCnt]
else:
deltaX[iCnt] = (1.0 - areaFractions[iCnt-1]**0.5)*deltaX[iCnt]
if iCnt > 0:
sumX[iCnt] = pcr.ifthenelse(relZ[iCnt] == relZ[iCnt-1], sumX[iCnt-1] + deltaX[iCnt], 0.0)
minSlope = pcr.ifthenelse(relZ[iCnt] == relZ[iCnt-1], pcr.max( sumX[iCnt], minSlope), minSlope)
# - the maximum value for the floodplain slope is channel gradient (flow velocity is slower in the floodplain)
minSlope = pcr.min(self.gradient, 0.5* pcr.max(deltaX[1], minSlope)**-1.)
# - add small increment to elevations to each sill (except in the case of lakes, #TODO: verify this)
for iCnt in range(nrZLevels):
relZ[iCnt] = relZ[iCnt] + sumX[iCnt] * pcr.ifthenelse(relZ[nrZLevels-1] > 0., minSlope, 0.0)
# make sure that relZ[iCnt] >= relZ[iCnt-1] (added by Edwin)
if iCnt > 0: relZ[iCnt] = pcr.max(relZ[iCnt], relZ[iCnt-1])
#
########################################################################################################
########################################################################################################
# - set slope and smoothing interval between dy= y(i+1)-y(i) and dx= x(i+1)-x(i)
# on the basis of volume
#
floodVolume = [0.] * (nrZLevels) # volume (unit: m3)
mInterval = [0.] * (nrZLevels) # smoothing interval (unit: m3)
kSlope = [0.] * (nrZLevels) # slope (dimensionless)
#
for iCnt in range(1, nrZLevels):
floodVolume[iCnt] = floodVolume[iCnt-1] + \
0.5 * (areaFractions[iCnt] + areaFractions[iCnt-1]) * \
(relZ[iCnt] - relZ[iCnt-1]) * self.cellArea
kSlope[iCnt-1] = (areaFractions[iCnt] - areaFractions[iCnt-1])/\
pcr.max(0.001, floodVolume[iCnt] - floodVolume[iCnt-1])
for iCnt in range(1, nrZLevels):
if iCnt < (nrZLevels-1):
mInterval[iCnt] = 0.5 * self.reductionKK * pcr.min(floodVolume[iCnt+1] - floodVolume[iCnt], \
floodVolume[iCnt] - floodVolume[iCnt-1])
else:
mInterval[iCnt] = 0.5 * self.reductionKK *(floodVolume[iCnt] - floodVolume[iCnt-1])
#
########################################################################################################
return nrZLevels, areaFractions, relZ, floodVolume, kSlope, mInterval
def getRoutingParamAvgDischarge(self, avgDischarge, dist2celllength = None):
# obtain routing parameters based on average (longterm) discharge
# output: channel dimensions and
# characteristicDistance (for accuTravelTime input)
yMean = self.eta * pow (avgDischarge, self.nu ) # avgDischarge in m3/s
wMean = self.tau * pow (avgDischarge, self.phi)
wMean = pcr.max(wMean,0.01) # average flow width (m) - this could be used as an estimate of channel width (assuming rectangular channels)
wMean = pcr.cover(wMean,0.01)
yMean = pcr.max(yMean,0.01) # average flow depth (m) - this should NOT be used as an estimate of channel depth
yMean = pcr.cover(yMean,0.01)
# option to use constant channel width (m)
if not isinstance(self.predefinedChannelWidth,types.NoneType):\
wMean = pcr.cover(self.predefinedChannelWidth, wMean)
#
# minimum channel width (m)
wMean = pcr.max(self.minChannelWidth, wMean)
return (yMean, wMean)
def getCharacteristicDistance(self, yMean, wMean):
# Manning's coefficient:
usedManningsN = self.manningsN
# corrected Manning's coefficient:
if self.floodPlain:
# wetted perimeter
flood_only_wetted_perimeter = self.floodDepth * (2.0) + \
pcr.max(0.0, self.innundatedFraction*self.cellArea/self.channelLength - self.channelWidth)
channel_only_wetted_perimeter = \
pcr.min(self.channelDepth, vos.getValDivZero(self.channelStorage, self.channelLength*self.channelWidth, 0.0)) * 2.0 + \
self.channelWidth
# total channel wetted perimeter (unit: m)
channel_wetted_perimeter = channel_only_wetted_perimeter + \
flood_only_wetted_perimeter
# minimum channel wetted perimeter = 10 cm
channel_wetted_perimeter = pcr.max(0.1, channel_wetted_perimeter)
usedManningsN = ((channel_only_wetted_perimeter/channel_wetted_perimeter) * self.manningsN**(1.5) + \
( flood_only_wetted_perimeter/channel_wetted_perimeter) * self.floodplainManN**(1.5))**(2./3.)
# characteristicDistance (dimensionless)
# - This will be used for accutraveltimeflux & accutraveltimestate
# - discharge & storage = accutraveltimeflux & accutraveltimestate
# - discharge = the total amount of material flowing through the cell (m3/s)
# - storage = the amount of material which is deposited in the cell (m3)
#
characteristicDistance = \
( (yMean * wMean)/ \
(wMean + 2*yMean) )**(2./3.) * \
((self.gradient)**(0.5))/ \
usedManningsN * \
vos.secondsPerDay() # meter/day
characteristicDistance = \
pcr.max((self.cellSizeInArcDeg)*0.000000001,\
characteristicDistance/self.dist2celllength) # arcDeg/day
# charateristicDistance for each lake/reservoir:
lakeReservoirCharacteristicDistance = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
pcr.areaaverage(characteristicDistance, self.WaterBodies.waterBodyIds))
#
# - make sure that all outflow will be released outside lakes and reservoirs
outlets = pcr.cover(pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyOut) > 0, pcr.boolean(1)), pcr.boolean(0))
distance_to_outlets = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
pcr.ldddist(self.lddMap, outlets, pcr.scalar(1.0)))
#~ lakeReservoirCharacteristicDistance = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
#~ pcr.max(distance_to_outlets + pcr.downstreamdist(self.lddMap)*1.50, lakeReservoirCharacteristicDistance))
lakeReservoirCharacteristicDistance = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
pcr.max(distance_to_outlets + pcr.downstreamdist(self.lddMap)*2.50, lakeReservoirCharacteristicDistance))
lakeReservoirCharacteristicDistance = pcr.areamaximum(lakeReservoirCharacteristicDistance, self.WaterBodies.waterBodyIds)
#
# TODO: calculate lakeReservoirCharacteristicDistance while obtaining lake & reservoir parameters
characteristicDistance = pcr.cover(lakeReservoirCharacteristicDistance, characteristicDistance)
# PS: In accutraveltime function:
# If characteristicDistance (velocity) = 0 then:
# - accutraveltimestate will give zero
# - accutraveltimeflux will be very high
# TODO: Consider to use downstreamdist function.
# current solution: using the function "roundup" to ignore
# zero and very small values
characteristicDistance = \
pcr.roundup(characteristicDistance*100.)/100. # arcDeg/day
# and set minimum value of characteristicDistance:
characteristicDistance = pcr.cover(characteristicDistance, 0.1*self.cellSizeInArcDeg)
characteristicDistance = pcr.max(0.100*self.cellSizeInArcDeg, characteristicDistance) # TODO: check what the minimum distance for accutraveltime function
return characteristicDistance
def accuTravelTime(self):
# accuTravelTime ROUTING OPERATIONS
##############n############################################################################################################
# route only non negative channelStorage (otherwise stay):
channelStorageThatWillNotMove = pcr.ifthenelse(self.channelStorage < 0.0, self.channelStorage, 0.0)
self.channelStorage = pcr.max(0.0, self.channelStorage)
# also at least 1.0 m3 of water will stay - this is to minimize numerical errors due to float_32 pcraster implementations
channelStorageThatWillNotMove += self.channelStorage - pcr.rounddown(self.channelStorage)
self.channelStorage = pcr.rounddown(self.channelStorage)
# channelStorage that will be given to the ROUTING operation:
channelStorageForAccuTravelTime = pcr.max(0.0, self.channelStorage)
channelStorageForAccuTravelTime = pcr.cover(channelStorageForAccuTravelTime,0.0) # TODO: check why do we have to use the "cover" operation?
characteristicDistance = self.getCharacteristicDistance(self.yMean, self.wMean)
# estimating channel discharge (m3/day)
self.Q = pcr.accutraveltimeflux(self.lddMap,\
channelStorageForAccuTravelTime,\
pcr.max(0.0, characteristicDistance))
self.Q = pcr.cover(self.Q, 0.0)
# for very small velocity (i.e. characteristicDistanceForAccuTravelTime), discharge can be missing value.
# see: http://sourceforge.net/p/pcraster/bugs-and-feature-requests/543/
# http://karssenberg.geo.uu.nl/tt/TravelTimeSpecification.htm
#
# and make sure that no negative discharge
self.Q = pcr.max(0.0, self.Q) # unit: m3/day
# updating channelStorage (after routing)
self.channelStorage = pcr.accutraveltimestate(self.lddMap,\
channelStorageForAccuTravelTime,\
pcr.max(0.0, characteristicDistance)) # unit: m3
# return channelStorageThatWillNotMove to channelStorage:
self.channelStorage += channelStorageThatWillNotMove # unit: m3
# for non kinematic wave approaches, set subDishcarge Q in m3/s
self.subDischarge = self.Q / vos.secondsPerDay()
self.subDischarge = pcr.ifthen(self.landmask, self.subDischarge)
def estimate_length_of_sub_time_step(self):
# estimate the length of sub-time step (unit: s):
# - the shorter is the better
# - estimated based on the initial or latest sub-time step discharge (unit: m3/s)
#
length_of_sub_time_step = pcr.ifthenelse(self.subDischarge > 0.0,
self.water_height * self.dynamicFracWat * self.cellArea / \
self.subDischarge, vos.secondsPerDay())
# TODO: Check this logic with Rens!
# determine the number of sub time steps (based on Rens van Beek's method)
#
critical_condition = (length_of_sub_time_step < vos.secondsPerDay()) & \
(self.water_height > self.critical_water_height) & \
(self.lddMap != pcr.ldd(5))
#
number_of_sub_time_steps = vos.secondsPerDay() /\
pcr.cover(
pcr.areaminimum(\
pcr.ifthen(critical_condition, \
length_of_sub_time_step),self.landmask),\
vos.secondsPerDay()/self.limit_num_of_sub_time_steps)
number_of_sub_time_steps = 1.25 * number_of_sub_time_steps + 1
number_of_sub_time_steps = pcr.roundup(number_of_sub_time_steps)
#
number_of_loops = max(1.0, pcr.cellvalue(pcr.mapmaximum(number_of_sub_time_steps),1)[1]) # minimum number of sub_time_steps = 1
number_of_loops = int(max(self.limit_num_of_sub_time_steps, number_of_loops))
# actual length of sub-time step (s)
length_of_sub_time_step = vos.secondsPerDay() / number_of_loops
return (length_of_sub_time_step, number_of_loops)
def simplifiedKinematicWave(self):
"""
The 'simplifiedKinematicWave':
1. First, assume that all local fluxes has been added to 'channelStorage'. This is done outside of this function/method.
2. Then, the 'channelStorage' is routed by using 'pcr.kinematic function' with 'lateral_inflow' = 0.0.
"""
##########################################################################################################################
# TODO: REMOVE THIS METHOD AS THIS IS IRRELEVANT.
logger.info("Using the simplifiedKinematicWave method ! ")
# route only non negative channelStorage (otherwise stay):
channelStorageThatWillNotMove = pcr.ifthenelse(self.channelStorage < 0.0, self.channelStorage, 0.0)
# channelStorage that will be given to the ROUTING operation:
channelStorageForRouting = pcr.max(0.0, self.channelStorage) # unit: m3
# estimate of water height (m)
# - needed to estimate the length of sub-time step and
# also to estimate the channel wetted area (for the calculation of alpha and dischargeInitial)
self.water_height = channelStorageForRouting /\
(pcr.max(self.min_fracwat_for_water_height, self.dynamicFracWat) * self.cellArea)
# estimate the length of sub-time step (unit: s):
length_of_sub_time_step, number_of_loops = self.estimate_length_of_sub_time_step()
for i_loop in range(number_of_loops):
msg = "sub-daily time step "+str(i_loop+1)+" from "+str(number_of_loops)
logger.info(msg)
# alpha parameter and initial discharge variable needed for kinematic wave
alpha, dischargeInitial = \
self.calculate_alpha_and_initial_discharge_for_kinematic_wave(channelStorageForRouting, \
self.water_height, \
self.innundatedFraction, self.floodDepth)
# at the lake/reservoir outlets, use the discharge of water bofy outflow
waterBodyOutflowInM3PerSec = pcr.cover(
pcr.ifthen(\
self.WaterBodies.waterBodyOut,\
self.WaterBodies.waterBodyOutflow), 0.0) / vos.secondsPerDay()
waterBodyOutflowInM3PerSec = pcr.ifthen(\
pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \
waterBodyOutflowInM3PerSec)
dischargeInitial = pcr.cover(waterBodyOutflowInM3PerSec, dischargeInitial)
# discharge (m3/s) based on kinematic wave approximation
#~ logger.debug('start pcr.kinematic')
self.subDischarge = pcr.kinematic(self.lddMap, dischargeInitial, 0.0,
alpha, self.beta, \
1, length_of_sub_time_step, self.channelLength)
self.subDischarge = pcr.cover(self.subDischarge, 0.0)
self.subDischarge = pcr.max(0.0, pcr.cover(self.subDischarge, 0.0))
#~ logger.debug('done')
# make sure that we do not get negative channel storage
self.subDischarge = pcr.min(self.subDischarge * length_of_sub_time_step, \
pcr.max(0.0, channelStorageForRouting + pcr.upstream(self.lddMap, self.subDischarge * length_of_sub_time_step)))/length_of_sub_time_step
# update channelStorage (m3)
storage_change_in_volume = pcr.upstream(self.lddMap, self.subDischarge * length_of_sub_time_step) - \
self.subDischarge * length_of_sub_time_step
channelStorageForRouting += storage_change_in_volume
#
# route only non negative channelStorage (otherwise stay):
channelStorageThatWillNotMove += pcr.ifthenelse(channelStorageForRouting < 0.0, channelStorageForRouting, 0.0)
channelStorageForRouting = pcr.max(0.000, channelStorageForRouting)
# update flood fraction and flood depth
self.inundatedFraction, self.floodDepth = self.returnInundationFractionAndFloodDepth(channelStorageForRouting)
# update dynamicFracWat: fraction of surface water bodies (dimensionless) including lakes and reservoirs
# - lake and reservoir surface water fraction
self.dynamicFracWat = pcr.cover(\
pcr.min(1.0, self.WaterBodies.fracWat), 0.0)
# - fraction of channel (including its excess above bankfull capacity)
self.dynamicFracWat += pcr.max(0.0, 1.0 - self.dynamicFracWat) * pcr.max(self.channelFraction, self.innundatedFraction)
# - maximum value of dynamicFracWat is 1.0
self.dynamicFracWat = pcr.ifthen(self.landmask, pcr.min(1.0, self.dynamicFracWat))
# estimate water_height for the next loop
# - needed to estimate the channel wetted area (for the calculation of alpha and dischargeInitial)
self.water_height = channelStorageForRouting / (pcr.max(self.min_fracwat_for_water_height, self.dynamicFracWat) * self.cellArea)
# TODO: Check whether the usage of dynamicFracWat provides any problems?
# total discharge_volume (m3) until this present i_loop
if i_loop == 0: discharge_volume = pcr.scalar(0.0)
discharge_volume += self.subDischarge * length_of_sub_time_step
# channel discharge (m3/day) = self.Q
self.Q = discharge_volume
# updating channelStorage (after routing)
self.channelStorage = channelStorageForRouting
# return channelStorageThatWillNotMove to channelStorage:
self.channelStorage += channelStorageThatWillNotMove
def update(self,landSurface,groundwater,currTimeStep,meteo):
logger.info("routing in progress")
# waterBodies:
# - get parameters at the beginning of each year or simulation
# - note that the following function should be called first, specifically because
# we have to define initial conditions at the beginning of simulaution,
#
if currTimeStep.timeStepPCR == 1:
initial_conditions_for_water_bodies = self.getState()
self.WaterBodies.getParameterFiles(currTimeStep,\
self.cellArea,\
self.lddMap,\
initial_conditions_for_water_bodies) # the last line is for the initial conditions of lakes/reservoirs
if (currTimeStep.doy == 1) and (currTimeStep.timeStepPCR > 1):
self.WaterBodies.getParameterFiles(currTimeStep,\
self.cellArea,\
self.lddMap)
#
if self.includeWaterBodies == False:
self.WaterBodies.waterBodyIds = pcr.ifthen(self.landmask, pcr.nominal(-1)) # ignoring all lakes and reservoirs
# downstreamDemand (m3/s) for reservoirs
# - this one must be called before updating timestepsToAvgDischarge
# - estimated based on environmental flow discharge
self.downstreamDemand = self.estimate_discharge_for_environmental_flow(self.channelStorage)
# get routing/channel parameters/dimensions (based on avgDischarge)
# and estimating water bodies fraction ; this is needed for calculating evaporation from water bodies
self.yMean, self.wMean = \
self.getRoutingParamAvgDischarge(self.avgDischarge)
# channel width (unit: m)
self.channelWidth = self.wMean
# channel depth (unit: m)
self.channelDepth = pcr.max(0.0, self.yMean)
#
# option to use constant channel depth (m)
if not isinstance(self.predefinedChannelDepth, types.NoneType):\
self.channelDepth = pcr.cover(self.predefinedChannelDepth, self.channelDepth)
# channel bankfull capacity (unit: m3)
if self.floodPlain:
if self.usingFixedBankfullCapacity:
self.channelStorageCapacity = self.predefinedBankfullCapacity
else:
self.channelStorageCapacity = self.estimateBankfullCapacity(self.channelWidth, \
self.channelDepth)
# fraction of channel (dimensionless)
# - mininum inundated fraction
self.channelFraction = pcr.max(0.0, pcr.min(1.0,\
self.channelWidth * self.channelLength / (self.cellArea)))
# fraction of innundation due to flood (dimensionless) and flood/innundation depth (m)
self.innundatedFraction, self.floodDepth = self.returnInundationFractionAndFloodDepth(self.channelStorage)
# fraction of surface water bodies (dimensionless) including lakes and reservoirs
# - lake and reservoir surface water fraction
self.dynamicFracWat = pcr.cover(\
pcr.min(1.0, self.WaterBodies.fracWat), 0.0)
# - fraction of channel (including its excess above bankfull capacity)
self.dynamicFracWat += pcr.max(0.0, 1.0 - self.dynamicFracWat) * pcr.max(self.channelFraction, self.innundatedFraction)
# - maximum value of dynamicFracWat is 1.0
self.dynamicFracWat = pcr.ifthen(self.landmask, pcr.min(1.0, self.dynamicFracWat))
# routing methods
if self.method == "accuTravelTime" or self.method == "simplifiedKinematicWave": \
self.simple_update(landSurface, groundwater, currTimeStep, meteo)
#
if self.method == "kinematicWave": \
self.kinematic_wave_update(landSurface, groundwater, currTimeStep, meteo)
# NOTE that this method require abstraction from fossil groundwater.
# infiltration from surface water bodies (rivers/channels, as well as lakes and/or reservoirs) to groundwater bodies
# - this exchange fluxes will be handed in the next time step
# - in the future, this will be the interface between PCR-GLOBWB & MODFLOW (based on the difference between surface water levels & groundwater heads)
#
self.calculate_exchange_to_groundwater(groundwater, currTimeStep)
# volume water released in pits (losses: to the ocean / endorheic basin)
self.outgoing_volume_at_pits = pcr.ifthen(self.landmask,
pcr.cover(
pcr.ifthen(self.lddMap == pcr.ldd(5), self.Q), 0.0))
# TODO: accumulate water in endorheic basins that are considered as lakes/reservoirs
# estimate volume of water that can be extracted for abstraction in the next time step
self.readAvlChannelStorage = pcr.max(0.0, self.estimate_available_volume_for_abstraction(self.channelStorage))
# old-style reporting
self.old_style_routing_reporting(currTimeStep) # TODO: remove this one
def calculate_potential_evaporation(self,landSurface,currTimeStep,meteo,definedDynamicFracWat = None):
if self.no_zero_crop_water_coefficient == False: self.waterKC = 0.0
# potential evaporation from water bodies
# current principle:
# - if landSurface.actualET < waterKC * meteo.referencePotET * self.fracWat
# then, we add more evaporation
#
if (currTimeStep.day == 1) or (currTimeStep.timeStepPCR == 1) and self.no_zero_crop_water_coefficient:
waterKC = vos.netcdf2PCRobjClone(self.fileCropKC,'kc', \
currTimeStep.fulldate, useDoy = 'month',\
cloneMapFileName = self.cloneMap)
self.waterKC = pcr.ifthen(self.landmask,\
pcr.cover(waterKC, 0.0))
self.waterKC = pcr.max(self.minCropWaterKC, self.waterKC)
# potential evaporation from water bodies (m/day)) - reduced by evaporation that has been calculated in the landSurface module
waterBodyPotEvapOvesSurfaceWaterArea = pcr.ifthen(self.landmask, \
pcr.max(0.0,\
self.waterKC * meteo.referencePotET -\
landSurface.actualET )) # These values are NOT over the entire cell area.
# potential evaporation from water bodies over the entire cell area (m/day)
if definedDynamicFracWat == None: dynamicFracWat = self.dynamicFracWat
waterBodyPotEvap = waterBodyPotEvapOvesSurfaceWaterArea * dynamicFracWat
return waterBodyPotEvap
def calculate_evaporation(self,landSurface,groundwater,currTimeStep,meteo):
# calculate potential evaporation from water bodies OVER THE ENTIRE CELL AREA (m/day) ; not only over surface water bodies
self.waterBodyPotEvap = self.calculate_potential_evaporation(landSurface,currTimeStep,meteo)
# evaporation volume from water bodies (m3)
# - not limited to available channelStorage
volLocEvapWaterBody = self.waterBodyPotEvap * self.cellArea
# - limited to available channelStorage
volLocEvapWaterBody = pcr.min(\
pcr.max(0.0,self.channelStorage), volLocEvapWaterBody)
# update channelStorage (m3) after evaporation from water bodies
self.channelStorage = self.channelStorage -\
volLocEvapWaterBody
self.local_input_to_surface_water -= volLocEvapWaterBody
# evaporation (m) from water bodies
self.waterBodyEvaporation = volLocEvapWaterBody / self.cellArea
self.waterBodyEvaporation = pcr.ifthen(self.landmask, self.waterBodyEvaporation)
def calculate_exchange_to_groundwater(self,groundwater,currTimeStep):
if self.debugWaterBalance:\
preStorage = self.channelStorage # unit: m3
# riverbed infiltration (m3/day):
#
# - current implementation based on Inge's principle (later, will be based on groundater head (MODFLOW) and can be negative)
# - happening only if 0.0 < baseflow < total_groundwater_abstraction
# - total_groundwater_abstraction: from fossil and non fossil
# - infiltration rate will be based on aquifer saturated conductivity
# - limited to fracWat
# - limited to available channelStorage
# - this infiltration will be handed to groundwater in the next time step
# - References: de Graaf et al. (2014); Wada et al. (2012); Wada et al. (2010)
# - TODO: This concept should be IMPROVED.
#
if groundwater.useMODFLOW:
# river bed exchange have been calculated within the MODFLOW (via baseflow variable)
self.riverbedExchange = pcr.scalar(0.0)
else:
riverbedConductivity = groundwater.riverBedConductivity # unit: m/day
riverbedConductivity = pcr.min(0.1, riverbedConductivity) # maximum conductivity is 0.1 m/day (as recommended by Marc Bierkens: resistance = 1 day for 0.1 m river bed thickness)
total_groundwater_abstraction = pcr.max(0.0, groundwater.nonFossilGroundwaterAbs + groundwater.fossilGroundwaterAbstr) # unit: m
self.riverbedExchange = pcr.max(0.0,\
pcr.min(pcr.max(0.0,self.channelStorage),\
pcr.ifthenelse(groundwater.baseflow > 0.0, \
pcr.ifthenelse(total_groundwater_abstraction > groundwater.baseflow, \
riverbedConductivity * self.dynamicFracWat * self.cellArea, \
0.0), 0.0)))
self.riverbedExchange = pcr.cover(self.riverbedExchange, 0.0)
factor = 0.25 # to avoid flip flop
self.riverbedExchange = pcr.min(self.riverbedExchange, (1.0-factor)*pcr.max(0.0,self.channelStorage))
self.riverbedExchange = pcr.ifthenelse(self.channelStorage < 0.0, 0.0, self.riverbedExchange)
self.riverbedExchange = pcr.cover(self.riverbedExchange, 0.0)
self.riverbedExchange = pcr.ifthen(self.landmask, self.riverbedExchange)
# update channelStorage (m3) after riverbedExchange (m3)
self.channelStorage -= self.riverbedExchange
self.local_input_to_surface_water -= self.riverbedExchange
if self.debugWaterBalance:\
vos.waterBalanceCheck([pcr.scalar(0.0)],\
[self.riverbedExchange/self.cellArea],\
[ preStorage/self.cellArea],\
[ self.channelStorage/self.cellArea],\
'channelStorage after surface water infiltration',\
True,\
currTimeStep.fulldate,threshold=1e-4)
def simple_update(self,landSurface,groundwater,currTimeStep,meteo):
# updating timesteps to calculate long and short term statistics values of avgDischarge, avgInflow, avgOutflow, etc.
self.timestepsToAvgDischarge += 1.
if self.debugWaterBalance:\
preStorage = self.channelStorage # unit: m3
# the following variable defines total local change (input) to surface water storage bodies # unit: m3
# - only local processes; therefore not considering any routing processes
self.local_input_to_surface_water = pcr.scalar(0.0) # initiate the variable, start from zero
# runoff from landSurface cells (unit: m/day)
self.runoff = landSurface.landSurfaceRunoff +\
groundwater.baseflow
# update channelStorage (unit: m3) after runoff
self.channelStorage += self.runoff * self.cellArea
self.local_input_to_surface_water += self.runoff * self.cellArea
# update channelStorage (unit: m3) after actSurfaceWaterAbstraction
self.channelStorage -= landSurface.actSurfaceWaterAbstract * self.cellArea
self.local_input_to_surface_water -= landSurface.actSurfaceWaterAbstract * self.cellArea
# reporting channelStorage after surface water abstraction (unit: m3)
self.channelStorageAfterAbstraction = pcr.ifthen(self.landmask, self.channelStorage)
# return flow from (m) non irrigation water demand
# - calculated in the landSurface.py module
nonIrrReturnFlowVol = landSurface.nonIrrReturnFlow*self.cellArea
self.channelStorage += nonIrrReturnFlowVol
self.local_input_to_surface_water += nonIrrReturnFlowVol
# water consumption for non irrigation water demand (m) - this water is removed from the system/water balance
self.nonIrrWaterConsumption = pcr.max(0.0,\
landSurface.nonIrrGrossDemand - \
landSurface.nonIrrReturnFlow)
# calculate evaporation from water bodies - this will return self.waterBodyEvaporation (unit: m)
self.calculate_evaporation(landSurface, groundwater, currTimeStep, meteo)
if self.debugWaterBalance:\
vos.waterBalanceCheck([self.runoff,\
landSurface.nonIrrReturnFlow],\
[landSurface.actSurfaceWaterAbstract,self.waterBodyEvaporation],\
[ preStorage/self.cellArea],\
[ self.channelStorage/self.cellArea],\
'channelStorage (unit: m) before lake/reservoir outflow',\
True,\
currTimeStep.fulldate,threshold=5e-3)
# LAKE AND RESERVOIR OPERATIONS
##########################################################################################################################
if self.debugWaterBalance: \
preStorage = self.channelStorage # unit: m3
# at cells where lakes and/or reservoirs defined, move channelStorage to waterBodyStorage
#
storageAtLakeAndReservoirs = \
pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
self.channelStorage)
storageAtLakeAndReservoirs = pcr.cover(storageAtLakeAndReservoirs,0.0)
#
# - move only non negative values and use rounddown values
storageAtLakeAndReservoirs = pcr.max(0.00, pcr.rounddown(storageAtLakeAndReservoirs))
self.channelStorage -= storageAtLakeAndReservoirs # unit: m3
# update waterBodyStorage (inflow, storage and outflow)
self.WaterBodies.update(storageAtLakeAndReservoirs,\
self.timestepsToAvgDischarge,\
self.maxTimestepsToAvgDischargeShort,\
self.maxTimestepsToAvgDischargeLong,\
currTimeStep,\
self.avgDischarge,\
vos.secondsPerDay(),\
self.downstreamDemand)
# waterBodyStorage (m3) after outflow: # values given are per water body id (not per cell)
self.waterBodyStorage = pcr.ifthen(self.landmask,
self.WaterBodies.waterBodyStorage)
# transfer outflow from lakes and/or reservoirs to channelStorages
waterBodyOutflow = pcr.cover(\
pcr.ifthen(\
self.WaterBodies.waterBodyOut,
self.WaterBodies.waterBodyOutflow), 0.0) # unit: m3/day
if self.method == "accuTravelTime":
# distribute outflow to water body storage
# - this is to avoid 'waterBodyOutflow' skipping cells
# - this is done by distributing waterBodyOutflow within lake/reservoir cells
#
waterBodyOutflow = pcr.areaaverage(waterBodyOutflow, self.WaterBodies.waterBodyIds)
waterBodyOutflow = pcr.ifthen(\
pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0,
waterBodyOutflow)
self.waterBodyOutflow = pcr.cover(waterBodyOutflow, 0.0) # unit: m3/day
# update channelStorage (m3) after waterBodyOutflow (m3)
self.channelStorage += self.waterBodyOutflow
# Note that local_input_to_surface_water does not include waterBodyOutflow
if self.debugWaterBalance:\
vos.waterBalanceCheck([self.waterBodyOutflow/self.cellArea],\
[storageAtLakeAndReservoirs/self.cellArea],\
[ preStorage/self.cellArea],\
[ self.channelStorage/self.cellArea],\
'channelStorage (unit: m) after lake reservoir/outflow fluxes (errors here are most likely due to pcraster implementation in float_32)',\
True,\
currTimeStep.fulldate,threshold=1e-3)
# ROUTING OPERATION:
##########################################################################################################################
# - this will return new self.channelStorage (but still without waterBodyStorage)
# - also, this will return self.Q which is channel discharge in m3/day
#
if self.method == "accuTravelTime": self.accuTravelTime()
if self.method == "simplifiedKinematicWave": self.simplifiedKinematicWave()
#
#
# channel discharge (m3/s): for current time step
#
self.discharge = self.Q / vos.secondsPerDay()
self.discharge = pcr.max(0., self.discharge) # reported channel discharge cannot be negative
self.discharge = pcr.ifthen(self.landmask, self.discharge)
#
self.disChanWaterBody = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,\
pcr.areamaximum(self.discharge,self.WaterBodies.waterBodyIds))
self.disChanWaterBody = pcr.cover(self.disChanWaterBody, self.discharge)
self.disChanWaterBody = pcr.ifthen(self.landmask, self.disChanWaterBody)
#
self.disChanWaterBody = pcr.max(0.,self.disChanWaterBody) # reported channel discharge cannot be negative
#
#
##########################################################################################################################
# calculate the statistics of long and short term flow values
self.calculate_statistics(groundwater)
# return waterBodyStorage to channelStorage
self.channelStorage = self.return_water_body_storage_to_channel(self.channelStorage)
def calculate_alpha_and_initial_discharge_for_kinematic_wave(self, channelStorage, water_height, innundatedFraction, floodDepth):
# calculate alpha (dimensionless), which is the roughness coefficient
# - for kinewatic wave (see: http://pcraster.geo.uu.nl/pcraster/4.0.0/doc/manual/op_kinematic.html)
# - based on wetted area (m2) and wetted perimeter (m), as well as self.beta (dimensionless)
# - assuming rectangular channel
# - flood innundated areas with
# channel wetted area (m2)
# - the minimum wetted area is: water height x channel width (Edwin introduce this)
# - channel wetted area is mainly based on channelStorage and channelLength (Rens's approach)
channel_wetted_area = water_height * self.channelWidth
channel_wetted_area = pcr.max(channel_wetted_area,\
channelStorage / self.channelLength)
# wetted perimeter
flood_only_wetted_perimeter = floodDepth * (2.0) + \
pcr.max(0.0, innundatedFraction*self.cellArea/self.channelLength - self.channelWidth)
channel_only_wetted_perimeter = \
pcr.min(self.channelDepth, vos.getValDivZero(channelStorage, self.channelLength*self.channelWidth, 0.0)) * 2.0 + \
self.channelWidth
# total channel wetted perimeter (unit: m)
channel_wetted_perimeter = channel_only_wetted_perimeter + \
flood_only_wetted_perimeter
# minimum channel wetted perimeter = 10 cm
channel_wetted_perimeter = pcr.max(0.1, channel_wetted_perimeter)
# corrected Manning's coefficient:
if self.floodPlain:
usedManningsN = ((channel_only_wetted_perimeter/channel_wetted_perimeter) * self.manningsN**(1.5) + \
( flood_only_wetted_perimeter/channel_wetted_perimeter) * self.floodplainManN**(1.5))**(2./3.)
else:
usedManningsN = self.manningsN
# alpha (dimensionless) and initial estimate of channel discharge (m3/s)
#
alpha = (usedManningsN*channel_wetted_perimeter**(2./3.)*self.gradient**(-0.5))**self.beta # dimensionless
dischargeInitial = pcr.ifthenelse(alpha > 0.0,\
(channel_wetted_area / alpha)**(1.0/self.beta), 0.0) # unit: m3
return (alpha, dischargeInitial)
def calculate_alpha_and_initial_discharge_for_kinematic_wave_OLD(self, channelStorage = None):
# calculate alpha (dimensionless), which is the roughness coefficient
# - for kinewatic wave (see: http://pcraster.geo.uu.nl/pcraster/4.0.0/doc/manual/op_kinematic.html)
# - based on wetted area (m2) and wetted perimeter (m), as well as self.beta (dimensionless)
# - assuming rectangular channel
# Manning's coefficient:
usedManningsN = self.manningsN
# channel wetted area (m2)
# - alternative 1: based on channelStorage and channelLength (Rens's approach)
channel_wetted_area = self.water_height * self.channelWidth
# - alternative 2: the minimum wetted are is: water height x channel width (Edwin introduce this)
channel_wetted_area = pcr.max(channel_wetted_area,\
channelStorage / self.channelLength) # unit: m2
# channel wetted perimeter (m)
channel_wetted_perimeter = 2.0*channel_wetted_area/self.channelWidth + self.channelWidth # unit: m
# flood fraction (dimensionless) and flood depth (unit: m)
floodFraction = pcr.scalar(0.0)
floodDepth = pcr.scalar(0.0)
if self.floodPlain:
# return flood fraction and flood/innundation depth above the flood plain
floodFraction, floodDepth = self.returnInundationFractionAndFloodDepth(channelStorage)
# wetted perimeter
flood_only_wetted_perimeter = pcr.max(0.0, floodFraction*self.cellArea/self.channelLength - self.channelWidth) + \
floodDepth * (2.0)
channel_only_wetted_perimeter = \
self.channelWidth + \
2.0 * pcr.min(self.channelDepth, channelStorage/(self.channelLength*self.channelWidth))
#
channel_wetted_perimeter = channel_only_wetted_perimeter + flood_only_wetted_perimeter # unit: m
# corrected Manning's coefficient:
usedManningsN = ((channel_only_wetted_perimeter/channel_wetted_perimeter) * self.manningsN**(1.5) + \
( flood_only_wetted_perimeter/channel_wetted_perimeter) * self.floodplainManN**(1.5))**(2./3.)
# alpha (dimensionless) and estimate of channel discharge (m3/s)
#
alpha = (usedManningsN*channel_wetted_perimeter**(2./3.)*self.gradient**(-0.5))**self.beta # dimensionless
dischargeInitial = pcr.ifthenelse(alpha > 0.0,\
(channel_wetted_area / alpha)**(1.0/self.beta), 0.0) # unit: m3
return (alpha, dischargeInitial, floodFraction, floodDepth)
def integralLogisticFunction(self,x):
# returns a tupple of two values holding the integral of the logistic functions of (x) and (-x)
logInt=pcr.ln(pcr.exp(-x)+1)
return logInt,x+logInt
def returnInundationFractionAndFloodDepth(self, channelStorage):
# flood/innundation depth above the flood plain (unit: m)
floodDepth = 0.0
# channel and flood innundated fraction (dimensionless, the minimum value is channelFraction)
inundatedFraction = self.channelFraction
if self.floodPlain:
msg = 'Calculate channel inundated fraction and flood inundation depth above the floodplain.'
logger.info(msg)
# given the flood channel volume: channelStorage
# - return the flooded fraction and the associated water height
# - using a logistic smoother near intersections (K&K, 2007)
# flood/innundation/excess volume (excess above the bankfull capacity, unit: m3)
excessVolume = pcr.max(0.0, channelStorage - self.channelStorageCapacity)
# find the match on the basis of the shortest distance
# to the available intersections or steps
#
deltaXMin = self.floodVolume[self.nrZLevels-1]
y_i = pcr.scalar(1.0)
k = [pcr.scalar(0.0)]*2
mInt = pcr.scalar(0.0)
for iCnt in range(self.nrZLevels-1,0,-1):
# - find x_i for current volume and update match if applicable
# also update slope and intercept
deltaX = excessVolume - self.floodVolume[iCnt]
mask = pcr.abs(deltaX) < pcr.abs(deltaXMin)
deltaXMin = pcr.ifthenelse(mask, deltaX, deltaXMin)
y_i = pcr.ifthenelse(mask, self.areaFractions[iCnt], y_i)
k[0] = pcr.ifthenelse(mask, self.kSlope[iCnt-1], k[0])
k[1] = pcr.ifthenelse(mask, self.kSlope[iCnt], k[1])
mInt = pcr.ifthenelse(mask, self.mInterval[iCnt], mInt)
# all values returned, process data: calculate scaled deltaX and smoothed function
# on the basis of the integrated logistic functions PHI(x) and 1-PHI(x)
#
deltaX = deltaXMin
deltaXScaled = pcr.ifthenelse(deltaX < 0.,pcr.scalar(-1.),1.)*\
pcr.min(self.criterionKK,pcr.abs(deltaX/pcr.max(1.,mInt)))
logInt = self.integralLogisticFunction(deltaXScaled)
# compute fractional inundated/flooded area
inundatedFraction = pcr.ifthenelse(excessVolume > 0.0,\
pcr.ifthenelse(pcr.abs(deltaXScaled) < self.criterionKK,\
y_i-k[0]*mInt*logInt[0]+k[1]*mInt*logInt[1],\
y_i+pcr.ifthenelse(deltaX < 0.,k[0],k[1])*deltaX), 0.0)
# - minimum value is channelFraction
inundatedFraction = pcr.max(self.channelFraction, inundatedFraction)
# - maximum value is 1.0
inundatedFraction = pcr.max(0.,pcr.min(1.0, inundatedFraction)) # dimensionless
# calculate flooded/inundated depth (unit: m) above the floodplain
#_- it will be zero if excessVolume == 0
floodDepth = pcr.ifthenelse(inundatedFraction > 0., \
excessVolume/(pcr.max(self.min_fracwat_for_water_height, inundatedFraction)*self.cellArea),0.) # unit: m
# - maximum flood depth
max_flood_depth = 25.0
floodDepth = pcr.max(0.0, pcr.min(max_flood_depth, floodDepth))
return inundatedFraction, floodDepth
def return_water_body_storage_to_channel(self, channelStorage):
# return waterBodyStorage to channelStorage
#
waterBodyStorageTotal = \
pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
pcr.areaaverage(\
pcr.ifthen(self.landmask,self.WaterBodies.waterBodyStorage),\
pcr.ifthen(self.landmask,self.WaterBodies.waterBodyIds)) + \
pcr.areatotal(pcr.cover(\
pcr.ifthen(self.landmask,channelStorage), 0.0),\
pcr.ifthen(self.landmask,self.WaterBodies.waterBodyIds)))
waterBodyStoragePerCell = \
waterBodyStorageTotal*\
self.cellArea/\
pcr.areatotal(pcr.cover(\
self.cellArea, 0.0),\
pcr.ifthen(self.landmask,self.WaterBodies.waterBodyIds))
waterBodyStoragePerCell = \
pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
waterBodyStoragePerCell) # unit: m3
#
channelStorage = pcr.cover(waterBodyStoragePerCell, channelStorage) # unit: m3
channelStorage = pcr.ifthen(self.landmask, channelStorage)
return channelStorage
def kinematic_wave_update(self, landSurface,groundwater,currTimeStep,meteo):
logger.info("Using the fully kinematic wave method! ")
# updating timesteps to calculate long and short term statistics
# values of avgDischarge, avgInflow, avgOutflow, etc.
self.timestepsToAvgDischarge += 1.
# the following variable defines total local change (input) to surface water storage bodies # unit: m3
# - only local processes; therefore not considering any routing processes
self.local_input_to_surface_water = pcr.scalar(0.0) # initiate the variable, start from zero
# For simplification, surface water abstraction
# is done outside the sub daily time steps.
#
# update channelStorage (unit: m3) after actSurfaceWaterAbstraction
self.channelStorage -= landSurface.actSurfaceWaterAbstract * self.cellArea
self.local_input_to_surface_water -= landSurface.actSurfaceWaterAbstract * self.cellArea
#
# reporting channelStorage after surface water abstraction (unit: m3)
self.channelStorageAfterAbstraction = pcr.ifthen(self.landmask, self.channelStorage)
# return flow from (m) non irrigation water demand
# - calculated in the landSurface.py module: landSurface.nonIrrReturnFlow
# water consumption for non irrigation water demand (m) - this water is removed from the system/water balance
self.nonIrrWaterConsumption = pcr.max(0.0,\
landSurface.nonIrrGrossDemand - \
landSurface.nonIrrReturnFlow)
# runoff from landSurface cells (unit: m/day)
self.runoff = landSurface.landSurfaceRunoff +\
groundwater.baseflow # values are over the entire cell area
# route only non negative channelStorage (otherwise stay):
# - note that, the following includes storages in
channelStorageThatWillNotMove = pcr.ifthenelse(self.channelStorage < 0.0, self.channelStorage, 0.0)
# channelStorage that will be given to the ROUTING operation:
channelStorageForRouting = pcr.max(0.0, self.channelStorage) # unit: m3
# estimate of water height (m)
# - needed to estimate the length of sub-time step and
# also to estimate the channel wetted area (for the calculation of alpha and dischargeInitial)
self.water_height = channelStorageForRouting /\
(pcr.max(self.min_fracwat_for_water_height, self.dynamicFracWat) * self.cellArea)
# estimate the length of sub-time step (unit: s):
length_of_sub_time_step, number_of_loops = self.estimate_length_of_sub_time_step()
#######################################################################################################################
for i_loop in range(number_of_loops):
msg = "sub-daily time step "+str(i_loop+1)+" from "+str(number_of_loops)
logger.info(msg)
# initiating accumulated values:
if i_loop == 0:
acc_local_input_to_surface_water = pcr.scalar(0.0) # unit: m3
acc_water_body_evaporation_volume = pcr.scalar(0.0) # unit: m3
acc_discharge_volume = pcr.scalar(0.0) # unit: m3
if self.debugWaterBalance:\
preStorage = pcr.ifthen(self.landmask,\
channelStorageForRouting)
# update channelStorageForRouting after runoff and return flow from non irrigation demand
channelStorageForRouting += (self.runoff + landSurface.nonIrrReturnFlow) * \
self.cellArea * length_of_sub_time_step/vos.secondsPerDay() # unit: m3
acc_local_input_to_surface_water += (self.runoff + landSurface.nonIrrReturnFlow) * \
self.cellArea * length_of_sub_time_step/vos.secondsPerDay() # unit: m3
# potential evaporation within the sub-time step ; unit: m, values are over the entire cell area
#
water_body_potential_evaporation = self.calculate_potential_evaporation(landSurface,currTimeStep,meteo) *\
length_of_sub_time_step/vos.secondsPerDay()
# - accumulating potential evaporation
if i_loop == 0:
self.waterBodyPotEvap = pcr.scalar(0.0)
self.waterBodyPotEvap += water_body_potential_evaporation
# update channelStorageForRouting after evaporation
water_body_evaporation_volume = pcr.min(channelStorageForRouting, \
water_body_potential_evaporation * self.cellArea * length_of_sub_time_step/vos.secondsPerDay())
channelStorageForRouting -= water_body_evaporation_volume
acc_local_input_to_surface_water -= water_body_evaporation_volume
acc_water_body_evaporation_volume += water_body_evaporation_volume
if self.debugWaterBalance:\
vos.waterBalanceCheck([self.runoff * length_of_sub_time_step/vos.secondsPerDay(), \
landSurface.nonIrrReturnFlow * length_of_sub_time_step/vos.secondsPerDay()],\
[water_body_evaporation_volume/self.cellArea],\
[preStorage/self.cellArea],\
[channelStorageForRouting/self.cellArea],\
'channelStorageForRouting',\
True,\
currTimeStep.fulldate,threshold=5e-5)
# the kinematic wave is implemented only for channels (not to lakes and reservoirs)
# at cells where lakes and/or reservoirs defined, move channelStorage to waterBodyStorage
#
storageAtLakeAndReservoirs = \
pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,
channelStorageForRouting)
storageAtLakeAndReservoirs = pcr.cover(storageAtLakeAndReservoirs,0.0)
#
# - move only non negative values and use rounddown values
storageAtLakeAndReservoirs = pcr.max(0.00, pcr.rounddown(storageAtLakeAndReservoirs))
channelStorageForRouting -= storageAtLakeAndReservoirs # unit: m3
# alpha parameter and initial discharge variable needed for kinematic wave
alpha, dischargeInitial = \
self.calculate_alpha_and_initial_discharge_for_kinematic_wave(channelStorageForRouting, \
self.water_height, \
self.innundatedFraction, self.floodDepth)
# discharge (m3/s) based on kinematic wave approximation
#~ logger.debug('start pcr.kinematic')
self.subDischarge = pcr.kinematic(self.lddMap, dischargeInitial, 0.0,
alpha, self.beta, \
1, length_of_sub_time_step, self.channelLength)
self.subDischarge = pcr.max(0.0, pcr.cover(self.subDischarge, 0.0))
#~ logger.debug('done')
# set discharge to zero for lakes and reservoirs:
self.subDischarge = pcr.cover(\
pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0., pcr.scalar(0.0)), self.subDischarge)
# make sure that we do not get negative channel storage
self.subDischarge = pcr.min(self.subDischarge * length_of_sub_time_step, \
pcr.max(0.0, channelStorageForRouting + pcr.upstream(self.lddMap, self.subDischarge * length_of_sub_time_step)))/length_of_sub_time_step
# update channelStorage (m3)
storage_change_in_volume = pcr.upstream(self.lddMap, self.subDischarge * length_of_sub_time_step) - self.subDischarge * length_of_sub_time_step
channelStorageForRouting += storage_change_in_volume
if self.debugWaterBalance:\
vos.waterBalanceCheck([self.runoff * length_of_sub_time_step/vos.secondsPerDay(), \
landSurface.nonIrrReturnFlow * length_of_sub_time_step/vos.secondsPerDay(),\
storage_change_in_volume/self.cellArea],\
[water_body_evaporation_volume/self.cellArea],\
[preStorage/self.cellArea - storageAtLakeAndReservoirs/self.cellArea],\
[channelStorageForRouting/self.cellArea],\
'channelStorageForRouting (after routing, without lakes/reservoirs)',\
True,\
currTimeStep.fulldate,threshold=5e-4)
# lakes and reservoirs: update waterBodyStorage (inflow, storage and outflow)
self.WaterBodies.update(storageAtLakeAndReservoirs,\
self.timestepsToAvgDischarge,\
self.maxTimestepsToAvgDischargeShort,\
self.maxTimestepsToAvgDischargeLong,\
currTimeStep,\
self.avgDischarge,\
length_of_sub_time_step,\
self.downstreamDemand)
# waterBodyStorage (m3) after outflow: # values given are per water body id (not per cell)
self.waterBodyStorage = pcr.ifthen(self.landmask,
self.WaterBodies.waterBodyStorage)
# transfer outflow from lakes and/or reservoirs to channelStorages
waterBodyOutflow = pcr.cover(\
pcr.ifthen(\
self.WaterBodies.waterBodyOut,
self.WaterBodies.waterBodyOutflow), 0.0) # unit: m3
# update channelStorage (m3) after waterBodyOutflow (m3)
channelStorageForRouting += pcr.upstream(self.lddMap, waterBodyOutflow)
# Note that local_input_to_surface_water does not include waterBodyOutflow
# at the lake/reservoir outlets, add the discharge of water body outflow
waterBodyOutflowInM3PerSec = pcr.ifthen(\
self.WaterBodies.waterBodyOut,
self.WaterBodies.waterBodyOutflow) / length_of_sub_time_step
self.subDischarge = self.subDischarge + \
pcr.cover(waterBodyOutflowInM3PerSec, 0.0)
self.subDischarge = pcr.ifthen(self.landmask, self.subDischarge)
# total discharge_volume (m3) until this present i_loop
acc_discharge_volume += self.subDischarge * length_of_sub_time_step
# return waterBodyStorage to channelStorage
channelStorageForRouting = self.return_water_body_storage_to_channel(channelStorageForRouting)
# route only non negative channelStorage (otherwise stay):
channelStorageThatWillNotMove += pcr.ifthenelse(channelStorageForRouting < 0.0, channelStorageForRouting, 0.0)
channelStorageForRouting = pcr.max(0.000, channelStorageForRouting)
# update flood fraction and flood depth
self.inundatedFraction, self.floodDepth = self.returnInundationFractionAndFloodDepth(channelStorageForRouting)
# update dynamicFracWat: fraction of surface water bodies (dimensionless) including lakes and reservoirs
# - lake and reservoir surface water fraction
self.dynamicFracWat = pcr.cover(\
pcr.min(1.0, self.WaterBodies.fracWat), 0.0)
# - fraction of channel (including its excess above bankfull capacity)
self.dynamicFracWat += pcr.max(0.0, 1.0 - self.dynamicFracWat) * pcr.max(self.channelFraction, self.innundatedFraction)
# - maximum value of dynamicFracWat is 1.0
self.dynamicFracWat = pcr.ifthen(self.landmask, pcr.min(1.0, self.dynamicFracWat))
# estimate water_height for the next loop
# - needed to estimate the channel wetted area (for the calculation of alpha and dischargeInitial)
self.water_height = channelStorageForRouting / (pcr.max(self.min_fracwat_for_water_height, self.dynamicFracWat) * self.cellArea)
# TODO: Check whether the usage of dynamicFracWat provides any problems?
#######################################################################################################################
# evaporation (m/day)
self.waterBodyEvaporation = water_body_evaporation_volume / self.cellArea
# local input to surface water (m3)
self.local_input_to_surface_water += acc_local_input_to_surface_water
# channel discharge (m3/day) = self.Q
self.Q = acc_discharge_volume
# updating channelStorage (after routing)
self.channelStorage = channelStorageForRouting
# return channelStorageThatWillNotMove to channelStorage:
self.channelStorage += channelStorageThatWillNotMove
# channel discharge (m3/s): for current time step
#
self.discharge = self.Q / vos.secondsPerDay()
self.discharge = pcr.max(0., self.discharge) # reported channel discharge cannot be negative
self.discharge = pcr.ifthen(self.landmask, self.discharge)
#
self.disChanWaterBody = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.,\
pcr.areamaximum(self.discharge,self.WaterBodies.waterBodyIds))
self.disChanWaterBody = pcr.cover(self.disChanWaterBody, self.discharge)
self.disChanWaterBody = pcr.ifthen(self.landmask, self.disChanWaterBody)
#
self.disChanWaterBody = pcr.max(0.,self.disChanWaterBody) # reported channel discharge cannot be negative
# calculate the statistics of long and short term flow values
self.calculate_statistics(groundwater)
def calculate_statistics(self, groundwater):
# short term average inflow (m3/s) and long term average outflow (m3/s) from lake and reservoirs
self.avgInflow = pcr.ifthen(self.landmask, pcr.cover(self.WaterBodies.avgInflow , 0.0))
self.avgOutflow = pcr.ifthen(self.landmask, pcr.cover(self.WaterBodies.avgOutflow, 0.0))
# short term and long term average discharge (m3/s)
# - see: online algorithm on http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# - long term average disharge
#
dishargeUsed = pcr.max(0.0, self.discharge)
dishargeUsed = pcr.max(dishargeUsed, self.disChanWaterBody)
#
deltaAnoDischarge = dishargeUsed - self.avgDischarge
self.avgDischarge = self.avgDischarge +\
deltaAnoDischarge/\
pcr.min(self.maxTimestepsToAvgDischargeLong, self.timestepsToAvgDischarge)
self.avgDischarge = pcr.max(0.0, self.avgDischarge)
self.m2tDischarge = self.m2tDischarge + pcr.abs(deltaAnoDischarge*(dishargeUsed - self.avgDischarge))
#
# - short term average disharge
#
deltaAnoDischargeShort = dishargeUsed - self.avgDischargeShort
self.avgDischargeShort = self.avgDischargeShort +\
deltaAnoDischargeShort/\
pcr.min(self.maxTimestepsToAvgDischargeShort, self.timestepsToAvgDischarge)
self.avgDischargeShort = pcr.max(0.0, self.avgDischargeShort)
# long term average baseflow (m3/s) ; used as proxies for partitioning groundwater and surface water abstractions
#
baseflowM3PerSec = groundwater.baseflow * self.cellArea / vos.secondsPerDay()
deltaAnoBaseflow = baseflowM3PerSec - self.avgBaseflow
self.avgBaseflow = self.avgBaseflow +\
deltaAnoBaseflow/\
pcr.min(self.maxTimestepsToAvgDischargeLong, self.timestepsToAvgDischarge)
self.avgBaseflow = pcr.max(0.0, self.avgBaseflow)
def estimate_discharge_for_environmental_flow(self, channelStorage):
# statistical assumptions:
# - using z_score from the percentile 90
z_score = 1.2816
#~ # - using z_score from the percentile 95
#~ z_score = 1.645
# long term variance and standard deviation of discharge values
varDischarge = self.m2tDischarge / \
pcr.max(1.,\
pcr.min(self.maxTimestepsToAvgDischargeLong, self.timestepsToAvgDischarge)-1.)
# see: online algorithm on http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
stdDischarge = pcr.max(varDischarge**0.5, 0.0)
# calculate minimum discharge for environmental flow (m3/s)
minDischargeForEnvironmentalFlow = pcr.max(0.0, self.avgDischarge - z_score * stdDischarge)
factor = 0.10 # to avoid flip flop
minDischargeForEnvironmentalFlow = pcr.max(factor*self.avgDischarge, minDischargeForEnvironmentalFlow) # unit: m3/s
minDischargeForEnvironmentalFlow = pcr.max(0.0, minDischargeForEnvironmentalFlow)
return minDischargeForEnvironmentalFlow
def estimate_available_volume_for_abstraction(self, channelStorage, length_of_time_step = vos.secondsPerDay()):
# input: channelStorage in m3
# estimate minimum discharge for environmental flow (m3/s)
minDischargeForEnvironmentalFlow = self.estimate_discharge_for_environmental_flow(channelStorage)
# available channelStorage that can be extracted for surface water abstraction
readAvlChannelStorage = pcr.max(0.0,channelStorage)
# reduce readAvlChannelStorage if the average discharge < minDischargeForEnvironmentalFlow
readAvlChannelStorage *= pcr.min(1.0,\
vos.getValDivZero(pcr.max(0.0, pcr.min(self.avgDischargeShort, self.avgDischarge)), \
minDischargeForEnvironmentalFlow, vos.smallNumber))
# maintaining environmental flow if average discharge > minDischargeForEnvironmentalFlow # TODO: Check why do we need this?
readAvlChannelStorage = pcr.ifthenelse(self.avgDischargeShort < minDischargeForEnvironmentalFlow,
readAvlChannelStorage,
pcr.max(readAvlChannelStorage, \
pcr.max(0.0,\
self.avgDischargeShort - minDischargeForEnvironmentalFlow)*length_of_time_step))
# maximum (precentage) of water can be abstracted from the channel - to avoid flip-flop
maximum_percentage = 0.90
readAvlChannelStorage = pcr.min(readAvlChannelStorage, \
maximum_percentage*channelStorage)
readAvlChannelStorage = pcr.max(0.0,\
readAvlChannelStorage)
# ignore small volume values - less than 0.1 m3
readAvlChannelStorage = pcr.rounddown(readAvlChannelStorage*10.)/10.
readAvlChannelStorage = pcr.ifthen(self.landmask, readAvlChannelStorage)
return readAvlChannelStorage # unit: m3
def initiate_old_style_routing_reporting(self,iniItems):
self.report = True
try:
self.outDailyTotNC = iniItems.routingOptions['outDailyTotNC'].split(",")
self.outMonthTotNC = iniItems.routingOptions['outMonthTotNC'].split(",")
self.outMonthAvgNC = iniItems.routingOptions['outMonthAvgNC'].split(",")
self.outMonthEndNC = iniItems.routingOptions['outMonthEndNC'].split(",")
self.outAnnuaTotNC = iniItems.routingOptions['outAnnuaTotNC'].split(",")
self.outAnnuaAvgNC = iniItems.routingOptions['outAnnuaAvgNC'].split(",")
self.outAnnuaEndNC = iniItems.routingOptions['outAnnuaEndNC'].split(",")
except:
self.report = False
if self.report == True:
# daily output in netCDF files:
self.outNCDir = iniItems.outNCDir
self.netcdfObj = PCR2netCDF(iniItems)
#
if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_dailyTot.nc",\
var,"undefined")
# MONTHly output in netCDF files:
# - cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# initiating monthlyVarTot (accumulator variable):
vars(self)[var+'MonthTot'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthTot.nc",\
var,"undefined")
# - average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# initiating monthlyTotAvg (accumulator variable)
vars(self)[var+'MonthTot'] = None
# initiating monthlyVarAvg:
vars(self)[var+'MonthAvg'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthAvg.nc",\
var,"undefined")
# - last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthEnd.nc",\
var,"undefined")
# YEARly output in netCDF files:
# - cummulative
if self.outAnnuaTotNC[0] != "None":
for var in self.outAnnuaTotNC:
# initiating yearly accumulator variable:
vars(self)[var+'AnnuaTot'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaTot.nc",\
var,"undefined")
# - average
if self.outAnnuaAvgNC[0] != "None":
for var in self.outAnnuaAvgNC:
# initiating annualyVarAvg:
vars(self)[var+'AnnuaAvg'] = None
# initiating annualyTotAvg (accumulator variable)
vars(self)[var+'AnnuaTot'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaAvg.nc",\
var,"undefined")
# - last day of the year
if self.outAnnuaEndNC[0] != "None":
for var in self.outAnnuaEndNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaEnd.nc",\
var,"undefined")
def old_style_routing_reporting(self,currTimeStep):
if self.report == True:
timeStamp = datetime.datetime(currTimeStep.year,\
currTimeStep.month,\
currTimeStep.day,\
0)
# writing daily output to netcdf files
timestepPCR = currTimeStep.timeStepPCR
if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_dailyTot.nc",\
var,\
pcr2numpy(self.__getattribute__(var),vos.MV),\
timeStamp,timestepPCR-1)
# writing monthly output to netcdf files
# -cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# introduce variables at the beginning of simulation or
# reset variables at the beginning of the month
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.day == 1:\
vars(self)[var+'MonthTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'MonthTot'] += vars(self)[var]
# reporting at the end of the month:
if currTimeStep.endMonth == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthTot.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'MonthTot'),\
vos.MV),timeStamp,currTimeStep.monthIdx-1)
# -average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# only if a accumulator variable has not been defined:
if var not in self.outMonthTotNC:
# introduce accumulator at the beginning of simulation or
# reset accumulator at the beginning of the month
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.day == 1:\
vars(self)[var+'MonthTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'MonthTot'] += vars(self)[var]
# calculating average & reporting at the end of the month:
if currTimeStep.endMonth == True:
vars(self)[var+'MonthAvg'] = vars(self)[var+'MonthTot']/\
currTimeStep.day
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthAvg.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'MonthAvg'),\
vos.MV),timeStamp,currTimeStep.monthIdx-1)
#
# -last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
# reporting at the end of the month:
if currTimeStep.endMonth == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthEnd.nc",\
var,\
pcr2numpy(self.__getattribute__(var),vos.MV),\
timeStamp,currTimeStep.monthIdx-1)
# writing yearly output to netcdf files
# -cummulative
if self.outAnnuaTotNC[0] != "None":
for var in self.outAnnuaTotNC:
# introduce variables at the beginning of simulation or
# reset variables at the beginning of the month
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.doy == 1:\
vars(self)[var+'AnnuaTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'AnnuaTot'] += vars(self)[var]
# reporting at the end of the year:
if currTimeStep.endYear == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaTot.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'AnnuaTot'),\
vos.MV),timeStamp,currTimeStep.annuaIdx-1)
# -average
if self.outAnnuaAvgNC[0] != "None":
for var in self.outAnnuaAvgNC:
# only if a accumulator variable has not been defined:
if var not in self.outAnnuaTotNC:
# introduce accumulator at the beginning of simulation or
# reset accumulator at the beginning of the year
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.doy == 1:\
vars(self)[var+'AnnuaTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'AnnuaTot'] += vars(self)[var]
#
# calculating average & reporting at the end of the year:
if currTimeStep.endYear == True:
vars(self)[var+'AnnuaAvg'] = vars(self)[var+'AnnuaTot']/\
currTimeStep.doy
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaAvg.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'AnnuaAvg'),\
vos.MV),timeStamp,currTimeStep.annuaIdx-1)
#
# -last day of the year
if self.outAnnuaEndNC[0] != "None":
for var in self.outAnnuaEndNC:
# reporting at the end of the year:
if currTimeStep.endYear == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaEnd.nc",\
var,\
pcr2numpy(self.__getattribute__(var),vos.MV),\
timeStamp,currTimeStep.annuaIdx-1)
| UU-Hydro/PCR-GLOBWB_model | modflow/scripts/routing.py | Python | gpl-3.0 | 107,603 | [
"NetCDF"
] | c64413b678551cc73fe5c7d86bdf638ace4fe62cf80609cc084c46c3f102ce97 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import symm
from pyscf import lib
from pyscf.lib import logger
from pyscf.mcscf import mc1step
from pyscf.mcscf import newton_casscf
from pyscf.mcscf import casci_symm
from pyscf import fci
class CASSCF(newton_casscf.CASSCF):
__doc__ = newton_casscf.CASSCF.__doc__
def __init__(self, mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
newton_casscf.CASSCF.__init__(self, mf_or_mol, ncas, nelecas, ncore, frozen)
assert(self.mol.symmetry)
self.fcisolver = fci.solver(self.mol, False, True)
self.fcisolver.max_cycle = 25
#self.fcisolver.max_space = 25
def kernel(self, mo_coeff=None, ci0=None, callback=None, _kern=None):
if mo_coeff is None:
mo_coeff = self.mo_coeff
if callback is None: callback = self.callback
if _kern is None: _kern = newton_casscf.kernel
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
log = logger.Logger(self.stdout, self.verbose)
mo_coeff = self.mo_coeff = casci_symm.label_symmetry_(self, mo_coeff)
#
# if (getattr(self.fcisolver, 'wfnsym', None) and
# self.fcisolver.wfnsym is None and
# getattr(self.fcisolver, 'guess_wfnsym', None)):
# wfnsym = self.fcisolver.guess_wfnsym(self.ncas, self.nelecas, ci0,
# verbose=log)
# wfnsym = symm.irrep_id2name(self.mol.groupname, wfnsym)
# log.info('Active space CI wfn symmetry = %s', wfnsym)
self.converged, self.e_tot, self.e_cas, self.ci, \
self.mo_coeff, self.mo_energy = \
_kern(self, mo_coeff,
tol=self.conv_tol, conv_tol_grad=self.conv_tol_grad,
ci0=ci0, callback=callback, verbose=self.verbose)
log.note('CASSCF energy = %.15g', self.e_tot)
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff, self.mo_energy
def uniq_var_indices(self, nmo, ncore, ncas, frozen):
mask = mc1step.CASSCF.uniq_var_indices(self, nmo, ncore, ncas, frozen)
# Call _symmetrize function to remove the symmetry forbidden matrix elements
# (by setting their mask value to 0 in _symmetrize). Then pack_uniq_var and
# unpack_uniq_var function only operates on those symmetry allowed matrix
# elements.
# self.mo_coeff.orbsym is initialized in kernel function
return _symmetrize(mask, self.mo_coeff.orbsym, self.mol.groupname)
def _eig(self, mat, b0, b1, orbsym=None):
# self.mo_coeff.orbsym is initialized in kernel function
if orbsym is None:
orbsym = self.mo_coeff.orbsym[b0:b1]
return casci_symm.eig(mat, orbsym)
def rotate_mo(self, mo, u, log=None):
'''Rotate orbitals with the given unitary matrix'''
mo = newton_casscf.CASSCF.rotate_mo(self, mo, u, log)
mo = lib.tag_array(mo, orbsym=self.mo_coeff.orbsym)
return mo
def _symmetrize(mat, orbsym, groupname):
mat1 = numpy.zeros_like(mat)
orbsym = numpy.asarray(orbsym)
allowed = orbsym.reshape(-1,1) == orbsym
mat1[allowed] = mat[allowed]
return mat1
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
import pyscf.fci
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = CASSCF(m, 6, 4)
mc.fcisolver = pyscf.fci.solver(mol)
mc.verbose = 4
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
emc = mc.kernel(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
mc = CASSCF(m, 6, (3,1))
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
mc.verbose = 4
emc = mc.kernel(mo)[0]
print(emc - -75.7155632535814)
mc = CASSCF(m, 6, (3,1))
mc.fcisolver.wfnsym = 'B1'
mc.verbose = 4
emc = mc.kernel(mo)[0]
print(emc - -75.6406597705231)
| gkc1000/pyscf | pyscf/mcscf/newton_casscf_symm.py | Python | apache-2.0 | 5,008 | [
"PySCF"
] | 2cb2f2758bb18238ead8c52aa1e501510dd299a61a14a21fa2e0786828f6e052 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## This file contains the Python Interface to the ArchiveFactory.
##
##===------------------------------------------------------------------------------------------===##
from ctypes import POINTER, c_char_p
from .common import get_library, to_c_string
from .error import invoke
from .metainfomap import ArrayOfStringImpl
lib = get_library()
def register_library(library):
library.serialboxArchiveGetRegisteredArchives.argtypes = None
library.serialboxArchiveGetRegisteredArchives.restype = POINTER(ArrayOfStringImpl)
library.serialboxArchiveGetArchiveFromExtension.argtypes = [c_char_p]
library.serialboxArchiveGetArchiveFromExtension.restype = c_char_p
class Archive(object):
"""Provide information about the registered archives
"""
@staticmethod
def registered_archives():
"""Get a list of strings of the registered archives.
:return: Registered archives
:rtype: :class:`list` [:class:`str`]
"""
array = invoke(lib.serialboxArchiveGetRegisteredArchives)
list_array = []
for i in range(array.contents.len):
list_array += [array.contents.data[i].decode()]
invoke(lib.serialboxArrayOfStringDestroy, array)
return list_array
@staticmethod
def archive_from_extension(filename):
""" Deduce the name of the `archive` according to the extension of the `filename`.
Only the registered archives are taken into account!
=========== ========
Extensions Archives
=========== ========
.dat, .bin Binary
.nc NetCDF
=========== ========
:param filename: Name of the file
:type filename: str
:return: Name of the registered archive matching the file extension
:rtype: str
:raises serialbox.SerialboxError: if extensions is invalid or no registered archive
supports it.
"""
filestr = to_c_string(filename)[0]
return invoke(lib.serialboxArchiveGetArchiveFromExtension, filestr).decode()
register_library(lib)
| thfabian/serialbox2 | src/serialbox-python/serialbox/archive.py | Python | bsd-2-clause | 2,526 | [
"NetCDF"
] | 89af5db04ac6611985be0d0e3473b0a0380d6a82a79f3be7a00d95dc91db7bfd |
import Tools.HTML
if __name__ == "__main__":
import sys,os
selfname = sys.argv[0]
full_path = os.path.abspath(selfname)[:]
last_slash = full_path.rfind('/')
dirpath = full_path[:last_slash] + '/..'
print("Append to PYTHONPATH: %s" % (dirpath))
sys.path.append(dirpath)
import logging
from Tools.IO import is_readable
from Tools.file2 import file2
from Interface.Gaussian import Gaussian
from Interface.ChkGaussian import ChkGaussian
from Top import Top
log = logging.getLogger(__name__)
class NBOinteractions(Top):
"""
Shows orbital interactions. Two files are needed, Gaussian .log (with NBO output) and .chk (with NBO isosurfaces)
"""
def __init__(self):
self.fileL = ''
self.fileC = ''
self.L = Gaussian()
self.C = ChkGaussian()
def LookUpChkInLog(self):
FL = file2(self.fileL)
if not FL.skip_until(['^ #','(?i)%chk'],regexp=True):
log.debug('Checkpoint file name not found in %s' % (self.fileL))
return ''
log.debug('Checkpoint file name found in %s' % (self.fileL))
slash = self.fileL.rfind('/') + 1
path = self.fileL[:slash]
return path + FL.s.strip().split('=')[1]
def LookUpByBasename(self):
lastDot = self.fileL.rfind('.')
return self.fileL[:lastDot] + '.chk'
def parse(self):
#self.file = self.file[0]
if not 'l' in self.file:
log.error('NBOinteraction requires Gaussian .log file as input')
return
self.fileL = self.file['l']
if not is_readable(self.fileL):
return
self.L.file = self.fileL
if 'c' in self.file:
self.fileC = self.file['c']
else:
log.debug('Gaussian checkpoint file name is not provided in the input; trying to guess...')
self.fileC = self.LookUpChkInLog()
if self.fileC:
log.debug('Checkpoint file found: %s' % (self.fileC))
else:
self.fileC = self.LookUpByBasename()
if not is_readable(self.fileC):
return
self.C.file = self.fileC
self.L.parse()
self.C.parse()
def postprocess(self):
self.L.postprocess()
self.C.postprocess()
self.nbo = None
for Lstep in self.L.steps:
for g in Lstep.geoms:
if hasattr(g,'nbo_analysis'):
self.nbo = g.nbo_analysis
def webdata(self):
"""
Returns 2 strings with HTML code
"""
# Show weird bond angles topology
we = self.settings.Engine3D()
Lb1, Lb2 = self.L.webdata()
Cb1, Cb2 = self.C.webdata()
b1 = Lb1
b2 = Tools.HTML.tag('Gaussian Log File', 'strong') + Tools.HTML.brn + Lb2
b2 += Tools.HTML.tag('Gaussian Chk File', 'strong') + Tools.HTML.brn + Cb2
#---
def set_webData(nbo_result):
s = ''
any_shown = False
script_off = 'isosurface off; '
if 't' in self.file:
threshold = float(self.file['t'])
else:
threshold = float(self.settings.inbo_threshold)
cubes_done = {}
for sopta in nbo_result.sopta:
if float(sopta.e2) > threshold:
any_shown = True
s += '%s -> %s: %s kcal/mol' % (sopta.donor.sorb, sopta.acceptor.sorb, sopta.e2)
i_donor = str(sopta.donor.chk_index)
if i_donor in cubes_done:
c_donor = cubes_done[i_donor]
else:
c_donor = self.C.fchk.makeCube('MO='+i_donor, name='mo'+i_donor)
i_acceptor = str(sopta.acceptor.chk_index)
if i_acceptor in cubes_done:
c_acceptor = cubes_done[i_acceptor]
else:
c_acceptor = self.C.fchk.makeCube('MO='+i_acceptor, name='mo'+i_acceptor,colors='phase green yellow')
script = "%s ; %s ; %s" % (script_off, c_donor.s_script, c_acceptor.s_script)
#script += we.jmol_isosurface(webpath = c_donor.wpcube, surftype='MO',name='mo'+i_donor)
#script += "; "
#script += we.jmol_isosurface(webpath = c_acceptor.wpcube, surftype='MO',name='mo'+i_acceptor,colors='phase green yellow')
s += we.html_button(action=script, label='Show')
s += Tools.HTML.brn
if any_shown:
s += we.html_button(action=script_off, label='Off')
return s
#---
if self.nbo.options:
b2 += Tools.HTML.tag('Options:', 'strong') + Tools.HTML.brn
b2 += self.nbo.options + Tools.HTML.brn
if self.nbo.comments:
b2 += self.nbo.comments + Tools.HTML.brn
if self.nbo.OpenShell:
b2 += Tools.HTML.tag('Alpha spin NBO interactions', 'strong') + Tools.HTML.brn
b2 += set_webData(self.nbo.setA)
b2 += Tools.HTML.tag('Beta spin NBO interactions', 'strong') + Tools.HTML.brn
b2 += set_webData(self.nbo.setB)
else:
b2 += Tools.HTML.tag('NBO interactions', 'strong') + Tools.HTML.brn
b2 += set_webData(self.nbo.setAB)
log.debug('webdata generated successfully')
return b1, b2
#
#
#
#
#
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
from Settings import Settings
Top.settings = Settings(from_config_file= True)
f = NBOinteractions()
f.file = sys.argv[1:]
f.parse()
| talipovm/terse | terse/Interface/NBOinteractions.py | Python | mit | 5,715 | [
"Gaussian"
] | 04bd62685fd10965063c4ca9794b3c0515ccd93cd77c73369a6c4b33e0c727dc |
from __future__ import division
import numpy as np
from functools import partial
from builtins import zip
from pybasicbayes.distributions import DiagonalRegression, Gaussian, Regression
import pyhsmm
from pyhsmm.util.general import list_split
from pyslds.states import HMMSLDSStatesPython, HMMSLDSStatesEigen, HSMMSLDSStatesPython, HSMMSLDSStatesEigen
from pyslds.states import HMMCountSLDSStatesPython, HMMCountSLDSStatesEigen, HSMMCountSLDSStatesPython, \
HSMMCountSLDSStatesEigen
from pyslds.util import gaussian_map_estimation, regression_map_estimation, gaussian_logprior, regression_logprior
class _SLDSMixin(object):
def __init__(self,dynamics_distns,emission_distns,init_dynamics_distns,**kwargs):
self.init_dynamics_distns = init_dynamics_distns
self.dynamics_distns = dynamics_distns
# Allow for a single, shared emission distribution
if not isinstance(emission_distns, list):
self._single_emission = True
self._emission_distn = emission_distns
self.emission_distns = [emission_distns] * len(self.dynamics_distns)
else:
assert isinstance(emission_distns, list) and \
len(emission_distns) == len(dynamics_distns)
self._single_emission = False
self.emission_distns = emission_distns
super(_SLDSMixin,self).__init__(
obs_distns=self.dynamics_distns,**kwargs)
def generate(self, T=100, keep=True, with_noise=True, initial_condition=None, stateseq=None, **kwargs):
s = self._states_class(model=self, T=T, initialize_from_prior=True, **kwargs)
s.generate_states(with_noise=with_noise, initial_condition=initial_condition, stateseq=stateseq)
data = self._generate_obs(s)
if keep:
self.states_list.append(s)
return data + (s.stateseq,)
def _generate_obs(self,s):
if s.data is None:
s.data = s.generate_obs()
else:
# TODO: Handle missing data
raise NotImplementedError
return s.data, s.gaussian_states
def smooth(self, data, inputs=None, mask=None):
self.add_data(data, inputs=inputs, mask=mask)
s = self.states_list.pop()
return s.smooth()
@property
def diagonal_noise(self):
return all([isinstance(ed, DiagonalRegression) for ed in self.emission_distns])
@property
def has_missing_data(self):
return any([s.mask is not None for s in self.states_list])
def heldout_log_likelihood(self, test_masks=None):
test_masks = [None] * len(self.states_list) if test_masks is None else test_masks
assert len(test_masks) == len(self.states_list)
hll = 0
for mask, states in zip(test_masks, self.states_list):
hll += states.heldout_log_likelihood(test_mask=mask)
return hll
class _SLDSGibbsMixin(_SLDSMixin):
def resample_parameters(self):
self.resample_lds_parameters()
self.resample_hmm_parameters()
def resample_lds_parameters(self):
self.resample_init_dynamics_distns()
self.resample_dynamics_distns()
self.resample_emission_distns()
def resample_hmm_parameters(self):
super(_SLDSGibbsMixin,self).resample_parameters()
def resample_init_dynamics_distns(self):
for state, d in enumerate(self.init_dynamics_distns):
d.resample(
[s.gaussian_states[0] for s in self.states_list
if s.stateseq[0] == state])
self._clear_caches()
def resample_dynamics_distns(self):
zs = [s.stateseq[:-1] for s in self.states_list]
xs = [np.hstack((s.gaussian_states[:-1], s.inputs[:-1]))
for s in self.states_list]
ys = [s.gaussian_states[1:] for s in self.states_list]
for state, d in enumerate(self.dynamics_distns):
d.resample(
[(x[z == state], y[z == state])
for x, y, z in zip(xs, ys, zs)])
self._clear_caches()
def resample_emission_distns(self):
if self._single_emission:
data = [(np.hstack((s.gaussian_states, s.inputs)), s.data)
for s in self.states_list]
mask = [s.mask for s in self.states_list] if self.has_missing_data else None
if self.has_missing_data:
self._emission_distn.resample(data=data, mask=mask)
else:
self._emission_distn.resample(data=data)
else:
for state, d in enumerate(self.emission_distns):
data = [(np.hstack((s.gaussian_states[s.stateseq == state],
s.inputs[s.stateseq == state])),
s.data[s.stateseq == state])
for s in self.states_list]
mask = [s.mask[s.stateseq == state] for s in self.states_list] \
if self.has_missing_data else None
if self.has_missing_data:
d.resample(data=data, mask=mask)
else:
d.resample(data=data)
self._clear_caches()
def resample_obs_distns(self):
pass # handled in resample_parameters
### joblib parallel
def _joblib_resample_states(self,states_list,num_procs):
from joblib import Parallel, delayed
import pyslds.parallel as parallel
if len(states_list) > 0:
joblib_args = list(map(self._get_joblib_pair, states_list))
parallel.model = self
parallel.args = list_split(joblib_args, num_procs)
idxs = range(len(parallel.args))
raw_stateseqs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(list(map(delayed(parallel._get_sampled_stateseq), idxs)))
flatten = lambda lst: [x for y in lst for x in y]
raw_stateseqs = flatten(raw_stateseqs)
# since list_split might reorder things, do the same to states_list
states_list = flatten(list_split(states_list, num_procs))
for s, tup in zip(states_list, raw_stateseqs):
s.stateseq, s.gaussian_states, s._normalizer = tup
class _SLDSVBEMMixin(_SLDSMixin):
def _vb_E_step(self):
# update the variational approximation for the states
for state in self.states_list:
state.vb_E_step()
def _vb_M_step(self):
# Update the HMM parameters
self._M_step_init_state_distn()
self._M_step_trans_distn()
# Update the LDS parameters
self._M_step_init_dynamics_distn()
self._M_step_dynamics_distn()
self._M_step_emission_distn()
def _M_step_init_dynamics_distn(self):
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_init_stats = lambda i, s: \
tuple(s.expected_states[0, i] * stat for stat in s.E_init_stats)
for state, d in enumerate(self.init_dynamics_distns):
gaussian_map_estimation(sum_tuples(E_init_stats(state, s) for s in self.states_list), d)
def _M_step_dynamics_distn(self):
contract = partial(np.tensordot, axes=1)
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_dyn_stats = lambda i, s: \
tuple(contract(s.expected_states[:-1, i], stat) for stat in s.E_dynamics_stats)
for state, d in enumerate(self.dynamics_distns):
regression_map_estimation(sum_tuples(E_dyn_stats(state, s) for s in self.states_list), d)
def _M_step_emission_distn(self):
contract = partial(np.tensordot, axes=1)
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
if self._single_emission:
E_emi_stats = lambda s: \
tuple(np.sum(stat, axis=0) for stat in s.E_emission_stats)
stats = sum_tuples(E_emi_stats(s) for s in self.states_list)
regression_map_estimation(stats, self._emission_distn)
else:
E_emi_stats = lambda i, s: \
tuple(contract(s.expected_states[:, i], stat) for stat in s.E_emission_stats)
for state, d in enumerate(self.emission_distns):
regression_map_estimation(sum_tuples(E_emi_stats(state, s) for s in self.states_list), d)
def VBEM_step(self, n_iter=1):
for _ in range(n_iter):
self._vb_E_step()
self._vb_M_step()
def VBEM_ELBO(self):
# log p(theta)
# todo: include transition distribution and init state distribution!
elbo = np.sum([gaussian_logprior(id) for id in self.init_dynamics_distns])
elbo += np.sum([regression_logprior(dd) for dd in self.dynamics_distns])
if self._single_emission:
elbo += regression_logprior(self.emission_distns[0])
else:
elbo += np.sum([regression_logprior(ed) for ed in self.emission_distns])
# E_q [log p(z, x, y, theta)]
elbo += sum(s.vb_elbo() for s in self.states_list)
return elbo
class _SLDSMeanFieldMixin(_SLDSMixin):
def meanfield_update_parameters(self):
self.meanfield_update_init_dynamics_distns()
self.meanfield_update_dynamics_distns()
self.meanfield_update_emission_distns()
super(_SLDSMeanFieldMixin, self).meanfield_update_parameters()
def meanfield_update_init_dynamics_distns(self):
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_stats = lambda i, s: \
tuple(s.expected_states[0,i] * stat for stat in s.E_init_stats)
for state, d in enumerate(self.init_dynamics_distns):
d.meanfieldupdate(
stats=sum_tuples(E_stats(state, s) for s in self.states_list))
def meanfield_update_dynamics_distns(self):
contract = partial(np.tensordot, axes=1)
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_stats = lambda i, s: \
tuple(contract(s.expected_states[1:,i], stat) for stat in s.E_dynamics_stats)
for state, d in enumerate(self.dynamics_distns):
d.meanfieldupdate(
stats=sum_tuples(E_stats(state, s) for s in self.states_list))
def meanfield_update_emission_distns(self):
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
if self._single_emission:
E_stats = lambda s: \
tuple(np.sum(stat, axis=0) for stat in s.E_emission_stats)
self._emission_distn.meanfieldupdate(
stats=sum_tuples(E_stats(s) for s in self.states_list))
else:
contract = partial(np.tensordot, axes=1)
E_stats = lambda i, s: \
tuple(contract(s.expected_states[:, i], stat) for stat in s.E_emission_stats)
for state, d in enumerate(self.emission_distns):
d.meanfieldupdate(
stats=sum_tuples(E_stats(state, s) for s in self.states_list))
def meanfield_update_obs_distns(self):
pass # handled in meanfield_update_parameters
### init
def _init_mf_from_gibbs(self):
# Now also update the emission and dynamics params
for ed in self.emission_distns:
if hasattr(ed, "_initialize_mean_field"):
ed._initialize_mean_field()
for dd in self.dynamics_distns:
if hasattr(dd, "_initialize_mean_field"):
dd._initialize_mean_field()
for s in self.states_list:
s._init_mf_from_gibbs()
### vlb
def vlb(self, states_last_updated=False):
vlb = 0.
vlb += sum(s.get_vlb(states_last_updated) for s in self.states_list)
vlb += self.trans_distn.get_vlb()
vlb += self.init_state_distn.get_vlb()
vlb += sum(d.get_vlb() for d in self.init_dynamics_distns)
vlb += sum(d.get_vlb() for d in self.dynamics_distns)
if self._single_emission:
vlb += self._emission_distn.get_vlb()
else:
vlb += sum(d.get_vlb() for d in self.emission_distns)
return vlb
class HMMSLDSPython(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HMMPython):
_states_class = HMMSLDSStatesPython
class HMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HMM):
_states_class = HMMSLDSStatesEigen
class HSMMSLDSPython(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HSMMPython):
_states_class = HSMMSLDSStatesPython
class HSMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HSMM):
_states_class = HSMMSLDSStatesEigen
class WeakLimitHDPHMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin,
pyhsmm.models.WeakLimitHDPHMM):
_states_class = HMMSLDSStatesEigen
class WeakLimitStickyHDPHMMSLDS(
_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin,
pyhsmm.models.WeakLimitStickyHDPHMM):
_states_class = HMMSLDSStatesEigen
class WeakLimitHDPHSMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin,
pyhsmm.models.WeakLimitHDPHSMM):
_states_class = HSMMSLDSStatesEigen
## Default constructors
def _default_model(model_class, K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3.0, init_state_distn='uniform',
**kwargs):
# Initialize init_dynamics_distns
init_dynamics_distns = \
[Gaussian(nu_0=D_latent+3,
sigma_0=3.*np.eye(D_latent),
mu_0=np.zeros(D_latent),
kappa_0=0.01)
for _ in range(K)]
if mu_inits is not None:
assert isinstance(mu_inits, list) and len(mu_inits) == K
for id, mu in zip(init_dynamics_distns, mu_inits):
id.mu = mu
if sigma_inits is not None:
assert isinstance(sigma_inits, list) and len(sigma_inits) == K
for id, sigma in zip(init_dynamics_distns, sigma_inits):
id.sigma = sigma
# Initialize dynamics distributions
dynamics_distns = [Regression(
nu_0=D_latent + 1,
S_0=D_latent * np.eye(D_latent),
M_0=np.hstack((.99 * np.eye(D_latent), np.zeros((D_latent, D_input)))),
K_0=D_latent * np.eye(D_latent + D_input))
for _ in range(K)]
if As is not None:
assert isinstance(As, list) and len(As) == K
if D_input > 0:
assert isinstance(Bs, list) and len(Bs) == K
As = [np.hstack((A, B)) for A,B in zip(As, Bs)]
else:
# As = [random_rotation(D_latent) for _ in range(K)]
As = [np.eye(D_latent) for _ in range(K)]
if D_input > 0:
As = [np.hstack((A, np.zeros((D_latent, D_input))))
for A in As]
for dd, A in zip(dynamics_distns, As):
dd.A = A
if sigma_statess is not None:
assert isinstance(sigma_statess, list) and len(sigma_statess) == K
else:
sigma_statess = [np.eye(D_latent) for _ in range(K)]
for dd, sigma in zip(dynamics_distns, sigma_statess):
dd.sigma = sigma
# Initialize emission distributions
_single_emission = (Cs is not None) and (not isinstance(Cs, list))
if _single_emission:
if D_input > 0:
assert Ds is not None and not isinstance(Ds, list)
Cs = np.hstack((Cs, Ds))
if sigma_obss is None:
sigma_obss = np.eye(D_obs)
emission_distns = Regression(
nu_0=D_obs + 3,
S_0=D_obs * np.eye(D_obs),
M_0=np.zeros((D_obs, D_latent + D_input)),
K_0=D_obs * np.eye(D_latent + D_input),
A=Cs, sigma=sigma_obss)
else:
emission_distns = [Regression(
nu_0=D_obs + 1,
S_0=D_obs * np.eye(D_obs),
M_0=np.zeros((D_obs, D_latent + D_input)),
K_0=D_obs * np.eye(D_latent + D_input))
for _ in range(K)]
if Cs is not None and sigma_obss is not None:
assert isinstance(Cs, list) and len(Cs) == K
assert isinstance(sigma_obss, list) and len(sigma_obss) == K
if D_input > 0:
assert isinstance(Ds, list) and len(Ds) == K
Cs = [np.hstack((C, D)) for C,D in zip(Cs, Ds)]
else:
Cs = [np.zeros((D_obs, D_latent + D_input)) for _ in range(K)]
sigma_obss = [0.05 * np.eye(D_obs) for _ in range(K)]
for ed, C, sigma in zip(emission_distns, Cs, sigma_obss):
ed.A = C
ed.sigma = sigma
model = model_class(
init_dynamics_distns=init_dynamics_distns,
dynamics_distns=dynamics_distns,
emission_distns=emission_distns,
init_state_distn=init_state_distn,
alpha=alpha,
**kwargs)
return model
def DefaultSLDS(K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3.,
**kwargs):
return _default_model(HMMSLDS, K, D_obs, D_latent, D_input=D_input,
mu_inits=mu_inits, sigma_inits=sigma_inits,
As=As, Bs=Bs, sigma_statess=sigma_statess,
Cs=Cs, Ds=Ds, sigma_obss=sigma_obss,
alpha=alpha,
**kwargs)
def DefaultWeakLimitHDPSLDS(K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3., gamma=3.,
**kwargs):
return _default_model(WeakLimitHDPHMMSLDS, K, D_obs, D_latent, D_input=D_input,
mu_inits=mu_inits, sigma_inits=sigma_inits,
As=As, Bs=Bs, sigma_statess=sigma_statess,
Cs=Cs, Ds=Ds, sigma_obss=sigma_obss,
alpha=alpha, gamma=gamma,
**kwargs)
def DefaultWeakLimitStickyHDPSLDS(K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3., gamma=3., kappa=10.,
**kwargs):
return _default_model(WeakLimitStickyHDPHMMSLDS, K, D_obs, D_latent, D_input=D_input,
mu_inits=mu_inits, sigma_inits=sigma_inits,
As=As, Bs=Bs, sigma_statess=sigma_statess,
Cs=Cs, Ds=Ds, sigma_obss=sigma_obss,
kappa=kappa, alpha=alpha, gamma=gamma,
**kwargs)
class _CountSLDSMixin(_SLDSGibbsMixin):
def resample_emission_distns(self):
if self._single_emission:
data = [(np.hstack((s.gaussian_states, s.inputs)), s.data)
for s in self.states_list]
mask = [s.mask for s in self.states_list] if self.has_missing_data else None
omega = [s.omega for s in self.states_list]
self._emission_distn.resample(data=data, mask=mask, omega=omega)
else:
for state, d in enumerate(self.emission_distns):
data = [(np.hstack((s.gaussian_states[s.stateseq == state],
s.inputs[s.stateseq == state])),
s.data[s.stateseq == state])
for s in self.states_list]
mask = [s.mask[s.stateseq == state] for s in self.states_list] \
if self.has_missing_data else None
omega = [s.omega[s.stateseq == state] for s in self.states_list]
d.resample(data=data, mask=mask, omega=omega)
self._clear_caches()
class HMMCountSLDSPython(_CountSLDSMixin, pyhsmm.models.HMMPython):
_states_class = HMMCountSLDSStatesPython
class HMMCountSLDS(_CountSLDSMixin, pyhsmm.models.HMM):
_states_class = HMMCountSLDSStatesEigen
class HSMMCountSLDSPython(_CountSLDSMixin, pyhsmm.models.HSMMPython):
_states_class = HSMMCountSLDSStatesPython
class HSMMCountSLDS(_CountSLDSMixin, pyhsmm.models.HSMM):
_states_class = HSMMCountSLDSStatesEigen
class WeakLimitHDPHMMCountSLDS(_CountSLDSMixin, pyhsmm.models.WeakLimitHDPHMM):
_states_class = HMMCountSLDSStatesEigen
class WeakLimitStickyHDPHMMCountSLDS(
_CountSLDSMixin, pyhsmm.models.WeakLimitStickyHDPHMM):
_states_class = HMMCountSLDSStatesEigen
class WeakLimitHDPHSMMCountSLDS(
_CountSLDSMixin, pyhsmm.models.WeakLimitHDPHSMM):
_states_class = HSMMCountSLDSStatesEigen
| mattjj/pyhsmm-slds | pyslds/models.py | Python | mit | 20,741 | [
"Gaussian"
] | 1b8bca3aef19960c61ac5989bba2d08a0b154470ca7ee2886daae277df1d7179 |
#!/usr/bin/env python
from numpy import random,array,sin,round#,sqrt
#from pylab import plot, show, legend, fill_between
parameter_list=[[20,100,6,10,0.05,1, 1], [10,30,7,9,0.5,0.5, 2]]
def regression_gaussian_process_modular (n=100,n_test=100, \
x_range=6,x_range_test=10,noise_var=0.5,width=1, seed=1):
from modshogun import RealFeatures, RegressionLabels, GaussianKernel, Math
try:
from modshogun import GaussianLikelihood, ZeroMean, \
ExactInferenceMethod, GaussianProcessRegression
except ImportError:
print("Eigen3 needed for Gaussian Processes")
return
# reproducable results
random.seed(seed)
Math.init_random(17)
# easy regression data: one dimensional noisy sine wave
X=random.rand(1,n)*x_range
X_test=array([[float(i)/n_test*x_range_test for i in range(n_test)]])
Y_test=sin(X_test)
Y=sin(X)+random.randn(n)*noise_var
# shogun representation
labels=RegressionLabels(Y[0])
feats_train=RealFeatures(X)
feats_test=RealFeatures(X_test)
# GP specification
shogun_width=width*width*2
kernel=GaussianKernel(10, shogun_width)
zmean = ZeroMean()
lik = GaussianLikelihood()
lik.set_sigma(noise_var)
inf = ExactInferenceMethod(kernel, feats_train, zmean, labels, lik)
# train GP
gp = GaussianProcessRegression(inf)
gp.train()
# some things we can do
alpha = inf.get_alpha()
diagonal = inf.get_diagonal_vector()
cholesky = inf.get_cholesky()
# get mean and variance vectors
mean = gp.get_mean_vector(feats_test)
variance = gp.get_variance_vector(feats_test)
# plot results
#plot(X[0],Y[0],'x') # training observations
#plot(X_test[0],Y_test[0],'-') # ground truth of test
#plot(X_test[0],mean, '-') # mean predictions of test
#fill_between(X_test[0],mean-1.96*sqrt(variance),mean+1.96*sqrt(variance),color='grey') # 95% confidence interval
#legend(["training", "ground truth", "mean predictions"])
#show()
return alpha, diagonal, round(variance,12), round(mean,12), cholesky
if __name__=='__main__':
print('Gaussian Process Regression')
regression_gaussian_process_modular(*parameter_list[0])
| elkingtonmcb/shogun | examples/undocumented/python_modular/regression_gaussian_process_modular.py | Python | gpl-3.0 | 2,062 | [
"Gaussian"
] | 2cad315605b6e4709615d5fd79b2032bf5e629120141154728c7ea85893a8ac0 |
# encoding: utf-8
"""
Enable pygtk to be used interacive by setting PyOS_InputHook.
Authors: Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import gtk, gobject
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
gtk.main_quit()
return False
def create_inputhook_gtk(stdin_file):
def inputhook_gtk():
gobject.io_add_watch(stdin_file, gobject.IO_IN, _main_quit)
gtk.main()
return 0
return inputhook_gtk
| dannyperry571/theapprentice | script.module.pydevd/lib/pydev_ipython/inputhookgtk.py | Python | gpl-2.0 | 1,086 | [
"Brian"
] | 8956f645d6819f3d8da4528a2664e160b6015caa57aeb2e44c577a20814f5bc3 |
PRODUCTS = {
'iphone': {
'name': 'iPhone 5S',
'category': 'Phones',
'price': 699,
},
'galaxy': {
'name': 'Samsung Galaxy 5',
'category': 'Phones',
'price': 649,
},
'ipad-air': {
'name': 'iPad Air',
'category': 'Tablets',
'price': 649,
},
'ipad-mini': {
'name': 'iPad Mini',
'category': 'Tablets',
'price': 549
}
}
| enixdark/im-r-e-d-i-s | flask-cook/my_app/product/models.py | Python | mit | 350 | [
"Galaxy"
] | 528f5cf2665aea1c9112fca992fa48d18416fd733dd5d59b9d7ae336f3064071 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" ESPResSo 8Ball billard game.
"""
from __future__ import print_function
import numpy as np
import math
from threading import Thread
import espressomd
from espressomd import thermostat
from espressomd import analyze
from espressomd import integrate
from espressomd import electrostatics
from espressomd import minimize_energy
import espressomd.interactions
import espressomd.visualization_opengl
import espressomd.shapes
required_features = ["LENNARD_JONES", "MASS", "EXTERNAL_FORCES"]
espressomd.assert_features(required_features)
print(
'8Ball BILLARD - An Espresso Visualizer Demo\nControls:\nNumpad 4/6: Adjust Angle\nNumpad 2/8: Adjust Impulse\nNumpad 5: Shoot')
#ESPRESSO
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
table_dim = [2.24, 1.12]
system.box_l = [table_dim[0], 3, table_dim[1]]
visualizer = espressomd.visualization_opengl.openGLLive(system,
ext_force_arrows=True,
ext_force_arrows_type_scale=[
0.02],
ext_force_arrows_type_radii=[
0.01],
background_color=[
0.5, 0.4, 0.5],
drag_enabled=False,
particle_type_materials=[
'medium', 'bright', 'bright', 'medium'],
particle_type_colors=[
[1, 1, 1], [0.5, 0.1, 0.1], [0.1, 0.2, 0.4], [0.2, 0.2, 0.2]],
constraint_type_materials=[
'dark'],
constraint_type_colors=[
[0.1, 0.424, 0.011], [0.1, 0.1, 0.1]],
camera_position=[
1.12, 2.8, 0.56],
window_size=[
1000, 600],
draw_axis=False,
light_pos=[
table_dim[
0] * 0.5, 1.0, table_dim[
1] * 0.5],
light_colors=[
[0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [1.0, 1.0, 1.0]],
light_brightness=1.0)
stopped = True
angle = np.pi * 0.5
impulse = 10.0
def decreaseAngle():
global angle, impulse
if stopped:
angle += 0.01
system.part[0].ext_force = impulse * \
np.array([math.sin(angle), 0, math.cos(angle)])
def increaseAngle():
global angle, impulse
if stopped:
angle -= 0.01
system.part[0].ext_force = impulse * \
np.array([math.sin(angle), 0, math.cos(angle)])
def decreaseImpulse():
global impulse, angle
if stopped:
impulse -= 0.5
system.part[0].ext_force = impulse * \
np.array([math.sin(angle), 0, math.cos(angle)])
def increaseImpulse():
global impulse, angle
if stopped:
impulse += 0.5
system.part[0].ext_force = impulse * \
np.array([math.sin(angle), 0, math.cos(angle)])
def fire():
global stopped
if stopped:
stopped = False
system.part[0].v = system.part[0].v + \
impulse * np.array([math.sin(angle), 0, math.cos(angle)])
system.part[0].fix = [0, 1, 0]
system.part[0].ext_force = [0, 0, 0]
visualizer.keyboardManager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('4', espressomd.visualization_opengl.KeyboardFireEvent.Hold, decreaseAngle))
visualizer.keyboardManager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('6', espressomd.visualization_opengl.KeyboardFireEvent.Hold, increaseAngle))
visualizer.keyboardManager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('2', espressomd.visualization_opengl.KeyboardFireEvent.Hold, decreaseImpulse))
visualizer.keyboardManager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('8', espressomd.visualization_opengl.KeyboardFireEvent.Hold, increaseImpulse))
visualizer.keyboardManager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('5', espressomd.visualization_opengl.KeyboardFireEvent.Pressed, fire))
def main():
global stopped
system.time_step = 0.00008
system.cell_system.skin = 0.4
table_h = 0.5
ball_diam = 0.0572
hole_dist = 0.02
hole_rad = 0.08
hole_score_rad = 0.1
hole_pos = [[hole_dist, table_h, hole_dist],
[hole_dist, table_h, table_dim[1] - hole_dist],
[table_dim[0] - hole_dist, table_h, hole_dist],
[table_dim[0] - hole_dist, table_h, table_dim[1] - hole_dist],
[table_dim[0] * 0.5, table_h, table_dim[1] - hole_dist],
[table_dim[0] * 0.5, table_h, hole_dist]]
types = {'cue_ball': 0, 'striped_ball': 1, 'solid_ball':
2, 'black_ball': 3, 'table': 4, 'wall': 5, 'hole': 6}
system.constraints.add(
shape=espressomd.shapes.Wall(dist=table_h, normal=[0.0, 1.0, 0.0]), particle_type=types['table'], penetrable=True)
system.constraints.add(
shape=espressomd.shapes.Wall(dist=0.01, normal=[1.0, 0.0, 0.0]), particle_type=types['wall'], penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(dist=-(table_dim[0] - 0.01), normal=[
-1.0, 0.0, 0.0]), particle_type=types['wall'], penetrable=True)
system.constraints.add(
shape=espressomd.shapes.Wall(dist=0.01, normal=[0.0, 0.0, 1.0]), particle_type=types['wall'], penetrable=True)
system.constraints.add(
shape=espressomd.shapes.Wall(dist=-(table_dim[1] - 0.01), normal=[0.0, 0.0, -1.0]), particle_type=types['wall'], penetrable=True)
for h in hole_pos:
system.constraints.add(shape=espressomd.shapes.Cylinder(center=(np.array(h) - np.array([0, table_h * 0.5, 0])).tolist(), axis=[
0, 1, 0], radius=hole_rad, length=1.02 * table_h, direction=1), particle_type=types['hole'], penetrable=True)
lj_eps = np.array([1])
lj_sig = np.array([ball_diam])
lj_cut = lj_sig * 2.0**(1.0 / 6.0)
lj_cap = 20
mass = np.array([0.17])
num_types = len(lj_sig)
#LENNARD JONES
def mix_eps(eps1, eps2, rule='LB'):
return math.sqrt(eps1 * eps2)
def mix_sig(sig1, sig2, rule='LB'):
return 0.5 * (sig1 + sig2)
for t1 in range(4):
for t2 in range(6):
system.non_bonded_inter[t1, t2].lennard_jones.set_params(
epsilon=mix_eps(lj_eps[0], lj_eps[0]),
sigma=mix_sig(lj_sig[0], lj_sig[0]),
cutoff=mix_sig(lj_cut[0], lj_cut[0]),
shift="auto")
ball_y = table_h + ball_diam * 1.5
#PARTICLES
ball_start_pos = [table_dim[0] * 0.25, ball_y, table_dim[1] * 0.5]
system.part.add(id=0, pos=ball_start_pos,
type=types['cue_ball'], mass=mass[0])
spawnpos = []
spawnpos.append(ball_start_pos)
ball = system.part[0]
d = lj_sig[0] * 1.15
a1 = np.array([d * math.sqrt(3) / 2.0, 0, -0.5 * d])
a2 = np.array([d * math.sqrt(3) / 2.0, 0, 0.5 * d])
sp = [system.box_l[0] * 0.7, ball_y,
system.box_l[2] * 0.5 + lj_sig[0] * 0.5]
pid = 1
order = [
types['solid_ball'],
types['striped_ball'], types['solid_ball'],
types['solid_ball'], types['black_ball'], types['striped_ball'],
types['striped_ball'], types['solid_ball'], types[
'striped_ball'], types['solid_ball'],
types['solid_ball'], types['striped_ball'], types['striped_ball'], types['solid_ball'], types['striped_ball']]
for i in range(5):
for j in range(i + 1):
N = i + 1
t = order[pid - 1]
pos = sp + a1 * (N - j) + a2 * j
system.part.add(
id=pid, pos=pos, mass=mass[0], type=t, fix=[0, 1, 0])
spawnpos.append(pos)
pid += 1
ball.ext_force = impulse * np.array([math.sin(angle), 0, math.cos(angle)])
ball.fix = [1, 1, 1]
system.thermostat.set_langevin(kT=0, gamma=0.8)
#ELECTROSTATICS
# p3m = electrostatics.P3M(prefactor=50, accuracy=1e-2)
# system.actors.add(p3m)
cleared_balls = [0, 0]
while True:
system.integrator.run(1)
vsum = 0
for p in system.part:
vsum += np.linalg.norm(p.v)
for h in hole_pos:
d = ((p.pos_folded[0] - h[0])**2 + (
p.pos_folded[2] - h[2])**2)**0.5
if (d < hole_score_rad):
if p.id == 0:
p.pos = ball_start_pos
p.v = [0, 0, 0]
elif p.id == 5:
for p in system.part:
p.pos = spawnpos[p.id]
p.v = [0, 0, 0]
p.fix = [0, 1, 0]
ball.fix = [1, 1, 1]
ball.ext_force = impulse * \
np.array([math.sin(angle), 0, math.cos(angle)])
stoppen = True
else:
t = p.type - 1
cleared_balls[t] += 1
if t == 0:
z = table_dim[1] - lj_sig[0] * 0.6
else:
z = lj_sig[0] * 0.6
p.pos = [cleared_balls[t] * lj_sig[0] * 1.5, 1.1, z]
p.fix = [1, 1, 1]
p.v = [0, 0, 0]
if not stopped and vsum < 0.3:
stopped = True
ball.fix = [1, 1, 1]
for p in system.part:
p.v = [0, 0, 0]
ball.ext_force = impulse * \
np.array([math.sin(angle), 0, math.cos(angle)])
visualizer.update()
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
| hmenke/espresso | samples/billard.py | Python | gpl-3.0 | 11,587 | [
"ESPResSo"
] | 879db182472420f8778550ffdddd0067dc5d60f67b6ac044520137902e929ba3 |
import numpy, scipy, math, sys
import keparray
from math import modf, cos, sin, radians, exp
from scipy import ndimage, interpolate
from scipy.ndimage import interpolation
from scipy.ndimage.interpolation import shift, rotate
from scipy.interpolate import RectBivariateSpline, interp2d
from keparray import rebin2D
from numpy import square, nansum, shape, array, empty, zeros, absolute, size
from sys import stdout, exit
# -----------------------------------------------
# define functions
def poly0():
return lambda p, x: p[0] + 0.0 * x
def poly1():
return lambda p, x: p[0] + p[1] * x
def poly2():
return lambda p, x: p[0] + p[1] * x + p[2] * x * x
def poly3():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3
def poly4():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4
def poly5():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4 + p[5] * x**5
def poly6():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4 + p[5] * x**5 + p[6] * x**6
def poly7():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4 + p[5] * x**5 + p[6] * x**6 + p[7] * x**7
def poly8():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4 + p[5] * x**5 + p[6] * x**6 + p[7] * x**7 + p[8] * x**8
def poly9():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4 + p[5] * x**5 + p[6] * x**6 + p[7] * x**7 + p[8] * x**8 + p[9] * x**9
def poly10():
return lambda p, x: p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3 + p[4] * x**4 + p[5] * x**5 + p[6] * x**6 + p[7] * x**7 + p[8] * x**8 + p[9] * x**9 + p[10] * x**10
def poly1con():
return lambda p, x: p[0] + x
def gauss():
return lambda p, x: p[0] * scipy.exp(-(x - p[1])**2 / (2.0 * p[2]**2))
def gauss0():
return lambda p, x: p[0] * scipy.exp(-x**2 / (2.0 * p[1]**2))
def congauss():
return lambda p, x: p[0] + p[1] * scipy.exp(-(x - p[2])**2 / (2.0 * p[3]**2))
def moffat0():
return lambda p, x: p[0] / (1.0 + (x / p[1])**2)**p[2]
def conmoffat():
return lambda p, x: p[0] + p[1] / (1.0 + ((x - p[2]) / p[3])**2)**p[4]
def sine():
return lambda p, x: p[0] * scipy.sin(2.0 * 3.14129 * x / p[1] - p[2])
def powerlaw():
return lambda p, x: p[0] + p[1] * x
# -----------------------------------------------
# smooth the data using a window with requested size
def smooth(x,window_len=10,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
#-------------------------------
def pei(law,wave,ebmv,rv,a_i,lambda_i,b_i,n_i):
# redden a spectrum using Pei Y.C., 1992 ApJ, 395, 130
# Rv = 3.08 : Milky Way (1)
# Rv = 3.16 : LMC (2)
# Rv = 2.93 : SMC (3)
# extinction at B (a_b)
a_b = ebmv * (1. + rv)
# convert Angstroms to microns
wave = wave / 1e4
# build function
xi = 0.
for i in range(6):
term = math.pow((wave / lambda_i[law,i]),n_i[law,i])
term += math.pow((lambda_i[law,i] / wave),n_i[law,i])
term += b_i[law,i]
term = a_i[law,i] / term
xi += term
# remove a_b normalization on the extinction curve
a_lambda=a_b*xi
if (wave < 0.08): a_lambda = 0.
# linearize extinction factor
return 10.**(-a_lambda/2.512)
#-------------------------------
def pei_paramters():
# Data from Pei Y.C., 1992 ApJ, 395, 130 (Table 4).
# Rv = 3.08 : Milky Way (1)
# Rv = 3.16 : LMC (2)
# Rv = 2.93 : SMC (3)
a_i = numpy.zeros([4,6])
lambda_i = numpy.zeros([4,6])
b_i = numpy.zeros([4,6])
n_i = numpy.zeros([4,6])
# Milky Way Extinction Law
a_i[1,0] = 165. # BKG
a_i[1,1] = 14. # FUV
a_i[1,2] = 0.045 # 2175 AA
a_i[1,3] = 0.002 # 9.7 um
a_i[1,4] = 0.002 # 18 um
a_i[1,5] = 0.012 # FIR
lambda_i[1,0] = 0.047 # BKG
lambda_i[1,1] = 0.08 # FUV
lambda_i[1,2] = 0.22 # 2175 AA
lambda_i[1,3] = 9.7 # 9.7 um
lambda_i[1,4] = 18. # 18 um
lambda_i[1,5] = 25. # FIR
b_i[1,0] = 90. # BKG
b_i[1,1] = 4. # FUV
b_i[1,2] = -1.95 # 2175 AA
b_i[1,3] = -1.95 # 9.7 um
b_i[1,4] = -1.8 # 18 um
b_i[1,5] = 0. # FIR
n_i[1,0] = 2. # BKG
n_i[1,1] = 6.5 # FUV
n_i[1,2] = 2. # 2175 AA
n_i[1,3] = 2. # 9.7 um
n_i[1,4] = 2. # 18 um
n_i[1,5] = 2. # FIR
return a_i, lambda_i, b_i, n_i
#------------------------------
# 1-d polynomial interpolation
def polyval(x,c,tensor=True):
c = array(c,ndmin=1,copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c + 0.0
if isinstance(x, (tuple, list)):
x = asarray(x)
if isinstance(x, numpy.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
c0 = c[-1] + x*0
for i in range(2, len(c) + 1) :
c0 = c[-i] + c0*x
return c0
#------------------------------
# 2-d polynomial interpolation
def polyval2d(x,y,c):
try:
x,y = array((x,y),copy=0)
except:
raise ValueError('x, y are incompatible')
c = polyval(x,c)
c = polyval(y,c,tensor=False)
return c
#------------------------------
# 2-d Gaussian interpolation
def PRFgauss2d(params,*args):
# notation from Vanderburg and Johnson (2014)
# pos: x, y position to extrapolate Gaussian to
# cen: x, y center of Gaussian
# A: amplitude of Gaussian
# sigma: x, y width of Gassian
# B: amplitude of rotation term
# D: background
# parameters
cen = [params[0],params[1]]
A = params[2]
sigma = [params[3],params[4]]
B = params[5]
D = params[6]
# arguments
posx = args[0]
posy = args[1]
flux = args[2]
dx = posx - cen[0]
dy = posy - cen[1]
z = square(dx) / sigma[0]**2 + square(dy) / sigma[1]**2
g = A * scipy.exp(-z - B * dx * dy) + D
res = square(flux - g)
return res
#------------------------------
# PRF interpolation function
def PRF2DET2(flux,OBJx,OBJy,DATx,DATy,splineInterpolation):
# where in the pixel is the source position?
PRFfit = zeros((size(DATy),size(DATx)))
for i in range(len(flux)):
FRCx,INTx = modf(OBJx[i])
FRCy,INTy = modf(OBJy[i])
if FRCx > 0.5:
FRCx -= 1.0
INTx += 1.0
if FRCy > 0.5:
FRCy -= 1.0
INTy += 1.0
FRCx = -FRCx
FRCy = -FRCy
# constuct model PRF in detector coordinates
for (j,y) in enumerate(DATy):
for (k,x) in enumerate(DATx):
dy = y - INTy + FRCy
dx = x - INTx + FRCx
PRFfit[j,k] = PRFfit[j,k] + splineInterpolation(dy,dx) * flux[i]
return PRFfit
#------------------------------
# PRF interpolation function
def PRF2DET(flux,OBJx,OBJy,DATx,DATy,wx,wy,a,splineInterpolation):
# trigonometry
cosa = cos(radians(a))
sina = sin(radians(a))
# where in the pixel is the source position?
PRFfit = zeros((size(DATy),size(DATx)))
for i in range(len(flux)):
FRCx,INTx = modf(OBJx[i])
FRCy,INTy = modf(OBJy[i])
if FRCx > 0.5:
FRCx -= 1.0
INTx += 1.0
if FRCy > 0.5:
FRCy -= 1.0
INTy += 1.0
FRCx = -FRCx
FRCy = -FRCy
# constuct model PRF in detector coordinates
for (j,y) in enumerate(DATy):
for (k,x) in enumerate(DATx):
xx = x - INTx + FRCx
yy = y - INTy + FRCy
dx = xx * cosa - yy * sina
dy = xx * sina + yy * cosa
PRFfit[j,k] += PRFfit[j,k] + splineInterpolation(dy*wy,dx*wx) * flux[i]
return PRFfit
#------------------------------
# PRF model
def PRF(params,*args):
# arguments
DATx = args[0]
DATy = args[1]
DATimg = args[2]
DATerr = args[3]
nsrc = args[4]
splineInterpolation = args[5]
col = args[6]
row = args[7]
# parameters
f = empty((nsrc))
x = empty((nsrc))
y = empty((nsrc))
for i in range(nsrc):
f[i] = params[i]
x[i] = params[nsrc+i]
y[i] = params[nsrc*2+i]
# calculate PRF model binned to the detector pixel size
PRFfit = PRF2DET(f,x,y,DATx,DATy,1.0,1.0,0.0,splineInterpolation)
# calculate the sum squared difference between data and model
# PRFres = nansum(square(DATimg - PRFfit) / square(DATerr))
PRFres = nansum(square(DATimg - PRFfit))
# keep the fit centered
if max(abs(col - x[0]),abs(row - y[0])) > 10.0:
PRFres = 1.0e300
return PRFres
#------------------------------
# PRF model with variable background
def PRFwithBackground(params,*args):
# arguments
DATx = args[0]
DATy = args[1]
DATimg = args[2]
DATerr = args[3]
nsrc = args[4]
bterms = args[5] + 1
bx = args[6]
by = args[7]
splineInterpolation = args[8]
col = args[9]
row = args[10]
# parameters
f = empty((nsrc))
x = empty((nsrc))
y = empty((nsrc))
for i in range(nsrc):
f[i] = params[i]
x[i] = params[nsrc+i]
y[i] = params[nsrc*2+i]
b = array([params[nsrc*3:nsrc*3+bterms],params[nsrc*3+bterms:nsrc*3+bterms*2]])
# calculate PRF model binned to the detector pixel size
PRFfit = PRF2DET(f,x,y,DATx,DATy,1.0,1.0,0.0,splineInterpolation)
# add background
if bterms == 1:
PRFfit += params[nsrc*3]
else:
PRFfit += polyval2d(bx, by, b)
# calculate the sum squared difference between data and model
PRFres = nansum(square(DATimg - PRFfit) / square(DATerr))
# keep the fit centered
if max(abs(col - x[0]),abs(row - y[0])) > 5.0:
PRFres = 1.0e300
return PRFres
#------------------------------
# PRF model with variable focus and background
def PRFwithFocusAndBackground(params,*args):
# arguments
DATx = args[0]
DATy = args[1]
DATimg = args[2]
DATerr = args[3]
nsrc = args[4]
bterms = args[5] + 1
bx = args[6]
by = args[7]
splineInterpolation = args[8]
col = args[9]
row = args[10]
# parameters
f = empty((nsrc))
x = empty((nsrc))
y = empty((nsrc))
for i in range(nsrc):
f[i] = params[i]
x[i] = params[nsrc+i]
y[i] = params[nsrc*2+i]
if bterms == 1:
b = params[nsrc*3]
else:
b = array([params[nsrc*3:nsrc*3+bterms],params[nsrc*3+bterms:nsrc*3+bterms*2]])
wx = params[-3]
wy = params[-2]
a = params[-1]
try:
PRFfit = PRF2DET(f,x,y,DATx,DATy,wx,wy,a,splineInterpolation)
# add background
if bterms == 1:
PRFfit = PRFfit + b
else:
PRFfit = PRFfit + polyval2d(bx, by, b)
# calculate the sum squared difference between data and model
# PRFres = nansum(square(DATimg - PRFfit))
# PRFres = nansum(numpy.abs(square(DATimg - PRFfit) / PRFfit))
PRFres = nansum(square(DATimg - PRFfit) / square(DATerr))
except:
PRFres = 1.0e30
# if wx > 1.15 or wx < 0.85:
# PRFres = 1.0e30
# if wy > 1.15 or wy < 0.85:
# PRFres = 1.0e30
# keep the fit centered
if max(abs(col - x[0]),abs(row - y[0])) > 10.0:
PRFres = 1.0e300
return PRFres
#------------------------------
# PRF model with variable focus
def PRFwithFocus(params,*args):
# arguments
DATx = args[0]
DATy = args[1]
DATimg = args[2]
DATerr = args[3]
nsrc = args[4]
splineInterpolation = args[5]
col = args[6]
row = args[7]
# parameters
f = empty((nsrc))
x = empty((nsrc))
y = empty((nsrc))
for i in range(nsrc):
f[i] = params[i]
x[i] = params[nsrc+i]
y[i] = params[nsrc*2+i]
wx = params[-3]
wy = params[-2]
a = params[-1]
# iterate over sources
try:
PRFfit = PRF2DET(f,x,y,DATx,DATy,wx,wy,0.0,splineInterpolation)
# calculate the sum squared difference between data and model
PRFres = nansum(square(DATimg - PRFfit) / square(DATerr))
except:
PRFres = 1.0e30
# if wx > 1.15 or wx < 0.85:
# PRFres = 1.0e30
# if wy > 1.15 or wy < 0.85:
# PRFres = 1.0e30
# keep the fit centered
if max(abs(col - x[0]),abs(row - y[0])) > 10.0:
PRFres = 1.0e300
return PRFres
#-----------------------------------------------------
# the residual between pixel data and 2D Kepler PRF model
def kepler_prf_2d(params,*args):
data = args[0]
prf = args[1]
prfDelY = args[2]
prfDelX = args[3]
prfDimY = args[4]
prfDimX = args[5]
prfY0 = args[6]
prfX0 = args[7]
interpolation = args[8]
verbose = args[9]
f,y,x = params
# interpolate PRF centroid to new pixel position
model = shift(prf,[y,x],order=1,mode='constant')
# extract the PRF model within the data limits
model = model[prfY0:prfY0+prfDimY,prfX0:prfX0+prfDimX]
# rebin the PRF image to the same size and dimension of the data image
model = rebin2D(model,[numpy.shape(data)[0],numpy.shape(data)[1]],interpolation,True,False)
model = model / prfDelY / prfDelX
# calculate the sum squared difference between data and model
residual = nansum(square(data - model * f))
# write out parameters
if verbose:
# txt = '\rFlux = %.2f e-/s ' % f
# txt += 'X = %.4f pix ' % (x * prfDelX)
# txt += 'Y = %.4f pix ' % (y * prfDelY)
# txt += ' ' * 5
# sys.stdout.write(txt)
# sys.stdout.flush()
txt = '\rPearson\'s chisq = %d for %d dof' % \
(int(nansum(square(data - model * f) / absolute(data))), (shape(data)[0] * shape(data)[1] - len(params)))
txt += ' ' * 5
sys.stdout.write(txt)
sys.stdout.flush()
return residual
#-----------------------------------------------------------------
# the residual between pixel data and 2D Kepler multiple PRF model
def kepler_multi_prf_2d(params,*args):
# arguments
data = args[0]
prf = args[1]
prfDelY = args[2]
prfDelX = args[3]
prfDimY = args[4]
prfDimX = args[5]
prfY0 = args[6]
prfX0 = args[7]
interpolation = args[8]
verbose = args[9]
# parameters
nsrc = len(params) / 3
f = empty((nsrc))
y = empty((nsrc))
x = empty((nsrc))
model = zeros((prfDimY+1,prfDimX+1))
for i in range(nsrc):
f[i] = params[i]
y[i] = params[nsrc+i]
x[i] = params[nsrc*2+i]
# interpolate PRF centroid to new pixel position
tmp = shift(prf,[y[i],x[i]],order=1,mode='constant')
# extract the PRF model within the data limits
model = model + tmp[prfY0:prfY0+prfDimY,prfX0:prfX0+prfDimX] * f[i]
# rebin the PRF image to the same size and dimension of the data image
model = rebin2D(model,[shape(data)[0],shape(data)[1]],interpolation,True,False)
model = model / prfDelY / prfDelX
# calculate the sum squared difference between data and model
residual = nansum(square(data - model))
# write out parameters
if verbose:
# if nsrc == 1:
# txt = '\rFlux = %.2f e-/s ' % f[1]
# txt += 'X = %7.4f pix ' % (x[1] * prfDelX)
# txt += 'Y = %7.4f pix ' % (y[1] * prfDelY)
# txt += 'Pearson\'s chisq = %d for %d dof' % \
# (int(nansum(square(data - model) / absolute(data))), (shape(data)[0] * shape(data)[1] - len(params)))
# else:
txt = '\rPearson\'s chisq = %d for %d dof' % \
(int(nansum(square(data - model) / absolute(data))), (shape(data)[0] * shape(data)[1] - len(params)))
txt += ' ' * 5
sys.stdout.write(txt)
sys.stdout.flush()
return residual
#---------------------------------------------------------------------------------
# the residual between pixel data and 2D Kepler multiple PRF model with background
def kepler_bkg_multi_prf_2d(params,*args):
# arguments
data = args[0]
prf = args[1]
prfDelY = args[2]
prfDelX = args[3]
prfDimY = args[4]
prfDimX = args[5]
prfY0 = args[6]
prfX0 = args[7]
interpolation = args[8]
verbose = args[9]
# parameters
nsrc = (len(params) - 1) / 3
f = empty((nsrc))
y = empty((nsrc))
x = empty((nsrc))
b = params[nsrc*3]
model = zeros((prfDimY+1,prfDimX+1))
for i in range(nsrc):
f[i] = params[i]
y[i] = params[nsrc+i]
x[i] = params[nsrc*2+i]
# interpolate PRF centroid to new pixel position
tmp = shift(prf,[y[i],x[i]],order=1,mode='constant')
# extract the PRF model within the data limits
model = model + tmp[prfY0:prfY0+prfDimY,prfX0:prfX0+prfDimX] * f[i]
# rebin the PRF image to the same size and dimension of the data image
model = rebin2D(model,[shape(data)[0],shape(data)[1]],interpolation,True,False)
model = model / prfDelY / prfDelX
model = model + b
# calculate the sum squared difference between data and model
residual = nansum(square(data - model))
# write out parameters
if verbose:
txt = '\rPearson\'s chisq = %d for %d dof' % \
(int(nansum(square(data - model) / data)), (shape(data)[0] * shape(data)[1] - len(params)))
txt += ' ' * 5
sys.stdout.write(txt)
sys.stdout.flush()
return residual
#-------------------------------------------------------------------------------------------
# the residual between pixel data and 2D Kepler multiple PRF model with background and focus
def kepler_focus_multi_prf_2d(params,*args):
# arguments
data = args[0]
prf = args[1]
prfDelY = args[2]
prfDelX = args[3]
datDimX = args[4]
datDimY = args[5]
interpolation = args[6]
verbose = args[7]
# parameters
nsrc = (len(params) - 2) / 3
f = empty((nsrc))
y = empty((nsrc))
x = empty((nsrc))
b = params[nsrc*3]
w = params[nsrc*3+1]
if w > 1.5:
w = 1.5
elif w < 1.0:
w = 1.0
for i in range(nsrc):
f[i] = params[i]
y[i] = params[nsrc+i]
x[i] = params[nsrc*2+i]
# dimensions of data image if it had PRF-sized pixels
prfDimY = datDimY / prfDelY / w
prfDimX = datDimX / prfDelX / w
print w, prfDimY, prfDimX
# location of the data image centered on the PRF image (in PRF pixel units)
prfY0 = (shape(prf)[0] - prfDimY) / 2
prfX0 = (shape(prf)[1] - prfDimX) / 2
# iterate over each source identified in the mask, build model
DY = 0.0; DX = 0.0
if int(prfDimY) % 2 == 0: DY = 1.0
if int(prfDimX) % 2 == 0: DX = 1.0
model = zeros((prfDimY+DY,prfDimX+DX))
for i in range(nsrc):
# interpolate PRF centroid to new pixel position
tmp = shift(prf,[y[i]/w,x[i]/w],order=1,mode='constant')
# extract the PRF model within the data limits
model = model + tmp[prfY0:prfY0+prfDimY,prfX0:prfX0+prfDimX] * f[i]
# rebin the PRF image to the same size and dimension of the data image
model = rebin2D(model,[shape(data)[0],shape(data)[1]],interpolation,True,False)
model = model / prfDelY / prfDelX / w / w
# add background to model
model = model + b
# calculate the sum squared difference between data and model
residual = nansum(square(data - model))
# write out parameters
if verbose:
txt = '\rPearson\'s chisq = %d for %d dof' % \
(int(nansum(square(data - model) / data)), (shape(data)[0] * shape(data)[1] - len(params)))
txt += ' ' * 5
sys.stdout.write(txt)
sys.stdout.flush()
return residual
#--------------------------------------------------------------------------------
# the residual between pixel data and 2D Kepler PRF model integrated over x and y
def kepler_prf_1d(params,*args):
data = args[0]
prf = args[1]
prfDelY = args[2]
prfDelX = args[3]
prfDimY = args[4]
prfDimX = args[5]
prfY0 = args[6]
prfX0 = args[7]
interpolation = args[8]
verbose = args[9]
fy,fx,y,x = params
# integrate along X and Y
dataY = data.sum(axis=1)
dataX = data.sum(axis=0)
prfY = prf.sum(axis=1)
prfX = prf.sum(axis=0)
# interpolate PRF centroid to new pixel position
modelY = shift(prfY,[y],order=1,mode='constant')
modelX = shift(prfX,[x],order=1,mode='constant')
# extract the PRF model within the data limits
modelY = modelY[prfY0:prfY0+prfDimY]
modelX = modelX[prfX0:prfX0+prfDimX]
# rebin the PRF image to the same size and dimension of the data image
modelY = rebin2D(modelY,[numpy.shape(data)[0]],interpolation,True,False)
modelY = modelY / prfDelY
modelX = rebin2D(modelX,[numpy.shape(data)[1]],interpolation,True,False)
modelX = modelX / prfDelX
# calculate the sum squared difference between data and model
residualY = nansum(square(dataY - modelY * fy))
residualX = nansum(square(dataX - modelX * fx))
return residualY + residualX
#--------------------------------------------------------------------------------
# convert BKJD to BJD
def BKJD2BJD(bkjd):
return bkjd + 2454833.0
#--------------------------------------------------------------------------------
# convert BJD to BKJD
def BJD2BKJD(bjd):
return bjd - 2454833.0
#------------------------------------------------------------------
# inverse normal cummulative function
def inv_normal_cummulative_function(p):
# Lower tail quantile for standard normal distribution function.
#
# This function returns an approximation of the inverse cumulative
# standard normal distribution function. I.e., given P, it returns
# an approximation to the X satisfying P = Pr{Z <= X} where Z is a
# random variable from the standard normal distribution.
#
# The algorithm uses a minimax approximation by rational functions
# and the result has a relative error whose absolute value is less
# than 1.15e-9.
#
# Author: Peter J. Acklam
# Time-stamp: 2000-07-19 18:26:14
# E-mail: pjacklam@online.no
# WWW URL: http://home.online.no/~pjacklam
# Coefficients in rational approximations.
a = [-3.969683028665376e+01, 2.209460984245205e+02,
-2.759285104469687e+02, 1.383577518672690e+02,
-3.066479806614716e+01, 2.506628277459239e+00]
b = [-5.447609879822406e+01, 1.615858368580409e+02,
-1.556989798598866e+02, 6.680131188771972e+01,
-1.328068155288572e+01]
c = [-7.784894002430293e-03, -3.223964580411365e-01,
-2.400758277161838e+00, -2.549732539343734e+00,
4.374664141464968e+00, 2.938163982698783e+00]
d = [7.784695709041462e-03, 3.224671290700398e-01,
2.445134137142996e+00, 3.754408661907416e+00]
# Define break-points.
plow = 0.02425
phigh = 1.0 - plow
# Rational approximation for lower region.
if p < plow:
q = math.sqrt(-2.0 * math.log(p))
return (((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) / \
((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1)
# Rational approximation for upper region.
if phigh < p:
q = math.sqrt(-2.0 * math.log(1.0 - p))
return -(((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) / \
((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1)
# Rational approximation for central region.
q = p - 0.5
r = q * q
return (((((a[0] * r + a[1]) * r + a[2]) * r + a[3]) * r + a[4]) * r + a[5]) * q / \
(((((b[0] * r + b[1]) * r + b[2]) * r+ b[3]) * r + b[4]) * r + 1)
| barentsen/dave | extractDetrend/K2photo/kepfunc.py | Python | mit | 25,086 | [
"Gaussian"
] | a25ddd303332e4fa8d681a432f16ecab3c904b13c0fe5cf7ba38dac8d3bd6e34 |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import copy
import os.path as op
from math import ceil
import warnings
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix
from .filter import resample
from .evoked import _get_peak
from .parallel import parallel_func
from .surface import (read_surface, _get_ico_surface, read_morph_map,
_compute_nearest, mesh_edges)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces)
from .utils import (get_subjects_dir, _check_subject, logger, verbose,
_time_mask, warn as warn_)
from .viz import plot_source_estimates
from .fixes import in1d, sparse_block_diag
from .io.base import ToDataFrameMixin, TimeMixin
from .externals.six import string_types
from .externals.six.moves import zip
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
""" Aux Function
"""
fid = open(filename, 'rb')
stc = dict()
fid.seek(0, 2) # go to end of file
file_length = fid.tell()
fid.seek(0, 0) # go to beginning of file
# read tmin in ms
stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1))
stc['tmin'] /= 1000.0
# read sampling rate in ms
stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1))
stc['tstep'] /= 1000.0
# read number of vertices/sources
vertices_n = int(np.fromfile(fid, dtype=">u4", count=1))
# read the source vector
stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n)
# read the number of timepts
data_n = int(np.fromfile(fid, dtype=">u4", count=1))
if (vertices_n and # vertices_n can be 0 (empty stc)
((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
# close the file
fid.close()
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tostring())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tostring())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tostring())
# close the file
fid.close()
def _read_3(fid):
""" Read 3 byte integer from file
"""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file and return as dict
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
""" Write 3 byte integer to file
"""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tostring())
def _write_w(filename, vertices, data):
"""Read a w file
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert(len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tostring())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tostring())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a soure estimate object
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VolSourceEstimate
The soure estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
"""
fname_arg = fname
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('-stc.h5'):
ftype = 'h5'
fname = fname[:-7]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype is not 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
h5_exist = op.exists(fname + '-stc.h5')
if all(stc_exist) and (ftype is not 'w'):
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif h5_exist:
ftype = 'h5'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
elif ftype == 'h5':
kwargs = read_hdf5(fname + '-stc.h5', title='mnepython')
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype == 'volume':
stc = VolSourceEstimate(**kwargs)
else:
stc = SourceEstimate(**kwargs)
return stc
def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
"""Helper function to generate a surface, volume or mixed source estimate
"""
if isinstance(vertices, list) and len(vertices) == 2:
# make a surface source estimate
stc = SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep,
subject=subject)
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list)\
and len(vertices) == 1:
stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
elif isinstance(vertices, list) and len(vertices) > 2:
# make a mixed source estimate
stc = MixedSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
return stc
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations"""
compat = False
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine SourceEstimates that do not have the '
'same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(ToDataFrameMixin, TimeMixin):
"""Abstract base class for source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array | list of two arrays
Vertex numbers corresponding to the data.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array or list of arrays of shape (n_dipoles,)
The indices of the dipoles in the different source spaces. Can
be an array if there is only one source space (e.g., for volumes).
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel and sens_data have invalid '
'dimensions')
if isinstance(vertices, list):
if not all(isinstance(v, np.ndarray) for v in vertices):
raise ValueError('Vertices, if a list, must contain numpy '
'arrays')
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing '
'order.')
n_src = sum([len(v) for v in vertices])
if len(vertices) == 1:
vertices = vertices[0]
elif isinstance(vertices, np.ndarray):
n_src = len(vertices)
else:
raise ValueError('Vertices must be a list or numpy array')
# safeguard the user against doing something silly
if data is not None and data.shape[0] != n_src:
raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '
'must match' % (n_src, data.shape[0]))
self._data = data
self.tmin = tmin
self.tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self.times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
@property
def sfreq(self):
"""Sample rate of the data"""
return 1. / self.tstep
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data
"""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
def crop(self, tmin=None, tmax=None):
"""Restrict SourceEstimate to a time interval
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[:, mask]
else:
self._data = self._data[:, mask]
self._update_times()
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
self._data = resample(self._data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
self._update_times()
@property
def data(self):
"""Numpy array of source estimate data"""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@property
def shape(self):
"""Shape of the data"""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep"""
self.times = self.tmin + (self.tstep * np.arange(self.shape[1]))
def __add__(self, a):
stc = copy.deepcopy(self)
stc += a
return stc
def __iadd__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data += a.data
else:
self._data += a
return self
def mean(self):
"""Make a summary stc file with mean power between tmin and tmax.
Returns
-------
stc : instance of SourceEstimate
The modified stc (method operates inplace).
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
mean_stc = SourceEstimate(self.data.mean(axis=1)[:, np.newaxis],
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return mean_stc
def __sub__(self, a):
stc = copy.deepcopy(self)
stc -= a
return stc
def __isub__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data -= a.data
else:
self._data -= a
return self
def __truediv__(self, a):
return self.__div__(a)
def __div__(self, a):
stc = copy.deepcopy(self)
stc /= a
return stc
def __itruediv__(self, a):
return self.__idiv__(a)
def __idiv__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data /= a.data
else:
self._data /= a
return self
def __mul__(self, a):
stc = copy.deepcopy(self)
stc *= a
return stc
def __imul__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data *= a.data
else:
self._data *= a
return self
def __pow__(self, a):
stc = copy.deepcopy(self)
stc **= a
return stc
def __ipow__(self, a):
self._remove_kernel_sens_data_()
self._data **= a
return self
def __radd__(self, a):
return self + a
def __rsub__(self, a):
return self - a
def __rmul__(self, a):
return self * a
def __rdiv__(self, a):
return self / a
def __neg__(self):
stc = copy.deepcopy(self)
stc._remove_kernel_sens_data_()
stc._data *= -1
return stc
def __pos__(self):
return self
def sqrt(self):
"""Take the square root
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of SourceEstimate instance"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Returns a SourceEstimate object with data summarized over time bins
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : instance of SourceEstimate
The binned SourceEstimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nv, _ = self.shape
nt = len(times) - 1
data = np.empty((nv, nt), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[:, i] = func(self.data[:, idx], axis=1)
tmin = times[0] + width / 2.
stc = _make_stc(data, vertices=self.vertices,
tmin=tmin, tstep=width, subject=self.subject)
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied
The transorm is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
`mne.fixes.partial`). The first parameter of the function is the
input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn_('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
mne.fixes.partial). The first parameter of the function is the
input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter (see below) must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : instance of SourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
tmax_idx = t_idx[-1]
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmax_idx = -1 if tmax_idx is None else tmax_idx
tmin = self.times[tmin_idx]
times = np.arange(self.times[tmin_idx],
self.times[tmax_idx] + self.tstep / 2, self.tstep)
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs._data, stcs.vertices = data_t, verts
stcs.tmin, stcs.times = tmin, times
return stcs
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Helper to find the center of mass on a surface"""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
class SourceEstimate(_BaseSourceEstimate):
"""Container for surface source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of two arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of two arrays of shape (n_dipoles,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not (isinstance(vertices, list) and len(vertices) == 2):
raise ValueError('Vertices, if a list, must contain two '
'numpy arrays')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file
Parameters
----------
fname : string
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : string
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype not in ('stc', 'w', 'h5'):
raise ValueError('ftype must be "stc", "w", or "h5", not "%s"'
% ftype)
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
write_hdf5(fname + '-stc.h5',
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject), title='mnepython')
logger.info('[done]')
def __repr__(self):
if isinstance(self.vertices, list):
nv = sum([len(v) for v in self.vertices])
else:
nv = self.vertices.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s x %s" % self.shape
return "<SourceEstimate | %s>" % s
@property
def lh_data(self):
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
return self.vertices[0]
@property
def rh_vertno(self):
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Returns a SourceEstimate object restricted to a label
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
"""
# make sure label and stc are compatible
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
elif label.hemi == 'rh':
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
else:
raise TypeError("Expected Label or BiHemiLabel; got %r" % label)
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = SourceEstimate(values, vertices=vertices,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : instance of SourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds), self._data.shape[1]))
self._data = np.insert(self._data, inds, new_data, axis=0)
return self
@verbose
def extract_label_time_course(self, labels, src, mode='mean_flip',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Valid values for mode are:
- 'mean': Average within each label.
- 'mean_flip': Average within each label with sign flip depending
on source orientation.
- 'pca_flip': Apply an SVD to the time courses within each label
and use the scaled and sign-flipped first right-singular vector
as the label time course. The scaling is performed such that the
power of the label time course is the same as the average
per-vertex time course power within the label. The sign of the
resulting time course is adjusted by multiplying it with
"sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This
procedure assures that the phase does not randomly change by 180
degrees from one stc to the next.
- 'max': Max value within each label.
Parameters
----------
labels : Label | list of Label
The labels for which to extract the time courses.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time course for
labels that do not have any vertices in the source estimate.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label_tc : array, shape=(len(labels), n_times)
Extracted time course for each label.
See Also
--------
extract_label_time_course : extract time courses for multiple STCs
"""
label_tc = extract_label_time_course(self, labels, src, mode=mode,
return_generator=False,
allow_empty=allow_empty,
verbose=verbose)
return label_tc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in [1]_.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.source_space.vertex_to_mni`.
Parameters
----------
subject : string | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
See Also
--------
Label.center_of_mass
vertex_to_mni
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, string_types):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
if hemi not in [0, 1]:
raise ValueError('hemi must be 0 or 1')
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto',
smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, config_opts=None, subjects_dir=None,
figure=None, views='lat', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground="white", initial_time=None, time_unit=None):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use or a custom look up table. If array, must
be (n x 3) or (n x 4) array for with RGB or RGBA values between
0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
based on whether 'lims' or 'pos_lims' are specified in `clim`.
time_label : str | callable | None
Format of the time label (a format string, a function that maps
floating point time values to strings, or None for no label). The
default is ``time=%0.2f ms``.
smoothing_steps : int
The amount of smoothing.
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Deprecated parameter.
subjects_dir : str
The path to the FreeSurfer subjects reconstructions.
It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. If 'auto', set clim
automatically based on data percentiles. If dict, should contain:
kind : str
Flag to specify type of limits. 'value' or 'percent'.
lims : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is not 'mne'.
Left, middle, and right bound for colormap.
pos_lims : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is 'mne'.
Left, middle, and right bound for colormap. Positive values
will be mirrored directly across zero during colormap
construction to obtain negative control points.
cortex : str or tuple
specifies how binarized curvature values are rendered.
either the name of a preset PySurfer cortex colorscheme (one of
'classic', 'bone', 'low_contrast', or 'high_contrast'), or the
name of mayavi colormap, or a tuple with values (colormap, min,
max, reverse) to fully specify the curvature colors.
size : float or pair of floats
The size of the window, in pixels. can be one number to specify
a square window, or the (width, height) of a rectangular window.
background : matplotlib color
Color of the background of the display window.
foreground : matplotlib color
Color of the foreground of the display window.
initial_time : float | None
The time to display on the plot initially. ``None`` to display the
first time sample (default).
time_unit : 's' | 'ms'
Whether time is represented in seconds (expected by PySurfer) or
milliseconds. The current default is 'ms', but will change to 's'
in MNE 0.14. To avoid a deprecation warning specify ``time_unit``
explicitly.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
brain = plot_source_estimates(self, subject, surface=surface,
hemi=hemi, colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
config_opts=config_opts,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar,
clim=clim, cortex=cortex, size=size,
background=background,
foreground=foreground,
initial_time=initial_time,
time_unit=time_unit)
return brain
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Return a SourceEstimate from morphed source to the original subject
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surf')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return SourceEstimate(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@verbose
def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None,
buffer_size=64, n_jobs=1, subject_from=None, sparse=False,
verbose=None):
"""Morph a source estimate from one subject to another
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
NOTE : If sparse=True, grade has to be set to None.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel.
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
sparse : bool
Morph as a sparse source estimate. If True the only
parameters used are subject_to and subject_from,
and grade has to be None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
if sparse:
if grade is not None:
raise RuntimeError('grade must be set to None if sparse=True.')
return _morph_sparse(self, subject_from, subject_to, subjects_dir)
else:
return morph_data(subject_from, subject_to, self, grade, smooth,
subjects_dir, buffer_size, n_jobs, verbose)
def morph_precomputed(self, subject_to, vertices_to, morph_mat,
subject_from=None):
"""Morph source estimate between subjects using a precomputed matrix
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, usually from compute_morph_matrix.
subject_from : string | None
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
return morph_data_precomputed(subject_from, subject_to, self,
vertices_to, morph_mat)
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
None: np.concatenate(self.vertices)}[hemi]
vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode)
return (vert_idx if vert_as_index else vertno[vert_idx],
time_idx if time_as_index else self.times[time_idx])
class VolSourceEstimate(_BaseSourceEstimate):
"""Container for volume source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not (isinstance(vertices, np.ndarray) or
isinstance(vertices, list) and len(vertices) == 1):
raise ValueError('Vertices must be a numpy array or a list with '
'one array')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file
Parameters
----------
fname : string
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : string
File format to use. Allowed values are "stc" (default) and "w".
The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype not in ['stc', 'w']:
raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices, data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices, data=self.data)
logger.info('[done]')
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a nifti file
Parameters
----------
fname : string
The name of the generated nifti file.
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
save_stc_as_volume(fname, self, src, dest=dest,
mri_resolution=mri_resolution)
def as_volume(self, src, dest='mri', mri_resolution=False):
"""Export volume source estimate as a nifti object
Parameters
----------
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
return save_stc_as_volume(None, self, src, dest=dest,
mri_resolution=mri_resolution)
def __repr__(self):
if isinstance(self.vertices, list):
nv = sum([len(v) for v in self.vertices])
else:
nv = self.vertices.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s x %s" % self.shape
return "<VolSourceEstimate | %s>" % s
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax,
mode)
return (vert_idx if vert_as_index else self.vertices[vert_idx],
time_idx if time_as_index else self.times[time_idx])
class MixedSourceEstimate(_BaseSourceEstimate):
"""Container for mixed surface and volume source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of arrays of shape (n_dipoles,)
The indices of the dipoles in each source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%02.f ms',
smoothing_steps=10,
transparent=None, alpha=1.0, time_viewer=False,
config_opts=None, subjects_dir=None, figure=None,
views='lat', colorbar=True, clim='auto'):
"""Plot surface source estimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
src : SourceSpaces
The source spaces to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use. See `plot_source_estimates`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing.
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the FreeSurfer subjects reconstructions.
It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | None
If None, the last figure will be cleaned and a new figure will
be created.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. See `plot_source_estimates`.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
# extract surface source spaces
surf = _ensure_src(src, kind='surf')
# extract surface source estimate
data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]
vertices = [s['vertno'] for s in surf]
stc = SourceEstimate(data, vertices, self.tmin, self.tstep,
self.subject, self.verbose)
return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,
colormap=colormap, time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
config_opts=config_opts,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar, clim=clim)
###############################################################################
# Morphing
@verbose
def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
warn=True, verbose=None):
"""Morph data from one subject's source space to another
Parameters
----------
data : array, or csr sparse matrix
A n_vertices x n_times (or other dimension) dataset to morph.
idx_use : array of int
Vertices from the original subject's data.
e : sparse matrix
The mesh edges of the "from" subject.
smooth : int
Number of smoothing iterations to perform. A hard limit of 100 is
also imposed.
n_vertices : int
Number of vertices.
nearest : array of int
Vertices on the destination surface to use.
maps : sparse matrix
Morph map from one subject to the other.
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data_morphed : array, or csr sparse matrix
The morphed data (same type as input).
"""
n_iter = 99 # max nb of smoothing iterations (minus one)
if smooth is not None:
if smooth <= 0:
raise ValueError('The number of smoothing operations ("smooth") '
'has to be at least 1.')
smooth -= 1
# make sure we're in CSR format
e = e.tocsr()
if sparse.issparse(data):
use_sparse = True
if not isinstance(data, sparse.csr_matrix):
data = data.tocsr()
else:
use_sparse = False
done = False
# do the smoothing
for k in range(n_iter + 1):
# get the row sum
mult = np.zeros(e.shape[1])
mult[idx_use] = 1
idx_use_data = idx_use
data_sum = e * mult
# new indices are non-zero sums
idx_use = np.where(data_sum)[0]
# typically want to make the next iteration have these indices
idx_out = idx_use
# figure out if this is the last iteration
if smooth is None:
if k == n_iter or len(idx_use) >= n_vertices:
# stop when vertices filled
idx_out = None
done = True
elif k == smooth:
idx_out = None
done = True
# do standard smoothing multiplication
data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out)
if done is True:
break
# do standard normalization
if use_sparse:
data.data /= data_sum[idx_use].repeat(np.diff(data.indptr))
else:
data /= data_sum[idx_use][:, None]
# do special normalization for last iteration
if use_sparse:
data_sum[data_sum == 0] = 1
data.data /= data_sum.repeat(np.diff(data.indptr))
else:
data[idx_use, :] /= data_sum[idx_use][:, None]
if len(idx_use) != len(data_sum) and warn:
warn_('%s/%s vertices not included in smoothing, consider increasing '
'the number of steps'
% (len(data_sum) - len(idx_use), len(data_sum)))
logger.info(' %d smooth iterations done.' % (k + 1))
data_morphed = maps[nearest, :] * data
return data_morphed
def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None):
"""Helper for morphing
Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]"
but faster.
"""
if len(idx_use_data) < e.shape[1]:
if use_sparse:
data = e[:, idx_use_data] * data
else:
# constructing a new sparse matrix is faster than sub-indexing
# e[:, idx_use_data]!
col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data)
d_sparse = sparse.csr_matrix((data.ravel(),
(row.ravel(), col.ravel())),
shape=(e.shape[1], data.shape[1]))
data = e * d_sparse
data = np.asarray(data.todense())
else:
data = e * data
# trim data
if idx_use_out is not None:
data = data[idx_use_out]
return data
def _get_subject_sphere_tris(subject, subjects_dir):
spheres = [op.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
tris = [read_surface(s)[1] for s in spheres]
return tris
def _sparse_argmax_nnz_row(csr_mat):
"""Return index of the maximum non-zero index in each row
"""
n_rows = csr_mat.shape[0]
idx = np.empty(n_rows, dtype=np.int)
for k in range(n_rows):
row = csr_mat[k].tocoo()
idx[k] = row.col[np.argmax(row.data)]
return idx
def _morph_sparse(stc, subject_from, subject_to, subjects_dir=None):
"""Morph sparse source estimates to an other subject
Parameters
----------
stc : SourceEstimate
The sparse STC.
subject_from : str
The subject on which stc is defined.
subject_to : str
The target subject.
subjects_dir : str
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
stc_morph : SourceEstimate
The morphed source estimates.
"""
maps = read_morph_map(subject_to, subject_from, subjects_dir)
stc_morph = stc.copy()
stc_morph.subject = subject_to
cnt = 0
for k, hemi in enumerate(['lh', 'rh']):
if stc.vertices[k].size > 0:
map_hemi = maps[k]
vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertices[k]])
order = np.argsort(vertno_k)
n_active_hemi = len(vertno_k)
data_hemi = stc_morph._data[cnt:cnt + n_active_hemi]
stc_morph._data[cnt:cnt + n_active_hemi] = data_hemi[order]
stc_morph.vertices[k] = vertno_k[order]
cnt += n_active_hemi
else:
stc_morph.vertices[k] = np.array([], int)
return stc_morph
@verbose
def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
subjects_dir=None, buffer_size=64, n_jobs=1, warn=True,
verbose=None):
"""Morph a source estimate from one subject to another
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
stc_from : SourceEstimate
Source estimates for subject "from" to morph
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
if not isinstance(stc_from, SourceEstimate):
raise ValueError('Morphing is only possible with surface source '
'estimates')
logger.info('Morphing data...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
# morph the data
data = [stc_from.lh_data, stc_from.rh_data]
data_morphed = [None, None]
n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size))
parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs)
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = stc_from.vertices[hemi]
if len(idx_use) == 0:
continue
data_morphed[hemi] = np.concatenate(
parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
n_vertices, nearest[hemi], maps[hemi],
warn=warn)
for data_buffer
in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
vertices = [nearest[0], nearest[1]]
if data_morphed[0] is None:
if data_morphed[1] is None:
data = np.r_[[], []]
vertices = [np.array([], int), np.array([], int)]
else:
data = data_morphed[1]
vertices = [np.array([], int), vertices[1]]
elif data_morphed[1] is None:
data = data_morphed[0]
vertices = [vertices[0], np.array([], int)]
else:
data = np.r_[data_morphed[0], data_morphed[1]]
stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep,
subject=subject_to, verbose=stc_from.verbose)
logger.info('[done]')
return stc_to
@verbose
def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
smooth=None, subjects_dir=None, warn=True,
verbose=None):
"""Get a matrix that morphs data from one subject to another
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
vertices_from : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_from
vertices_to : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_to
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string
Path to SUBJECTS_DIR is not set in the environment
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
morph_matrix : sparse matrix
matrix that morphs data from subject_from to subject_to
"""
logger.info('Computing morph matrix...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
morpher = [None] * 2
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices_from[hemi]
if len(idx_use) == 0:
morpher[hemi] = []
continue
m = sparse.eye(len(idx_use), len(idx_use), format='csr')
morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices,
vertices_to[hemi], maps[hemi], warn=warn)
# be careful about zero-length arrays
if isinstance(morpher[0], list):
morpher = morpher[1]
elif isinstance(morpher[1], list):
morpher = morpher[0]
else:
morpher = sparse_block_diag(morpher, format='csr')
logger.info('[done]')
return morpher
@verbose
def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
verbose=None):
"""Convert a grade to source space vertices for a given subject
Parameters
----------
subject : str
Name of the subject
grade : int | list
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
vertices : list of arrays of int
Vertex numbers for LH and RH
"""
# add special case for fsaverage for speed
if subject == 'fsaverage' and grade == 5:
return [np.arange(10242), np.arange(10242)]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
spheres_to = [op.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
lhs, rhs = [read_surface(s)[0] for s in spheres_to]
if grade is not None: # fill a subset of vertices
if isinstance(grade, list):
if not len(grade) == 2:
raise ValueError('grade as a list must have two elements '
'(arrays of output vertices)')
vertices = grade
else:
# find which vertices to use in "to mesh"
ico = _get_ico_tris(grade, return_surf=True)
lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
# Compute nearest vertices in high dim mesh
parallel, my_compute_nearest, _ = \
parallel_func(_compute_nearest, n_jobs)
lhs, rhs, rr = [a.astype(np.float32)
for a in [lhs, rhs, ico['rr']]]
vertices = parallel(my_compute_nearest(xhs, rr)
for xhs in [lhs, rhs])
# Make sure the vertices are ordered
vertices = [np.sort(verts) for verts in vertices]
for verts in vertices:
if (np.diff(verts) == 0).any():
raise ValueError(
'Cannot use icosahedral grade %s with subject %s, '
'mapping %s vertices onto the high-resolution mesh '
'yields repeated vertices, use a lower grade or a '
'list of vertices from an existing source space'
% (grade, subject, len(verts)))
else: # potentially fill the surface
vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
return vertices
def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to,
morph_mat):
"""Morph source estimate between subjects using a precomputed matrix
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
stc_from : SourceEstimate
Source estimates for subject "from" to morph.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, typically from compute_morph_matrix.
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
if not sparse.issparse(morph_mat):
raise ValueError('morph_mat must be a sparse matrix')
if not isinstance(vertices_to, list) or not len(vertices_to) == 2:
raise ValueError('vertices_to must be a list of length 2')
if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]:
raise ValueError('number of vertices in vertices_to must match '
'morph_mat.shape[0]')
if not stc_from.data.shape[0] == morph_mat.shape[1]:
raise ValueError('stc_from.data.shape[0] must be the same as '
'morph_mat.shape[0]')
if stc_from.subject is not None and stc_from.subject != subject_from:
raise ValueError('stc_from.subject and subject_from must match')
data = morph_mat * stc_from.data
stc_to = SourceEstimate(data, vertices_to, stc_from.tmin, stc_from.tstep,
verbose=stc_from.verbose, subject=subject_to)
return stc_to
@verbose
def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
"""Compute connectivity for a source space activation over time
Parameters
----------
src : instance of SourceSpaces
The source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if dist is None:
if src[0]['use_tris'] is None:
raise RuntimeError("The source space does not appear to be an ico "
"surface. Connectivity cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris'])
rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris'])
tris = np.concatenate((lh_tris, rh_tris + np.max(lh_tris) + 1))
connectivity = spatio_temporal_tris_connectivity(tris, n_times)
# deal with source space only using a subset of vertices
masks = [in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:
raise ValueError('Used vertices do not match connectivity shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn_('%0.1f%% of original source space vertices have been'
' omitted, tri-based connectivity will have holes.\n'
'Consider using distance-based connectivity or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
connectivity = connectivity.tocsr()
connectivity = connectivity[masks]
connectivity = connectivity[:, masks]
# return to original format
connectivity = connectivity.tocoo()
return connectivity
else: # use distances computed and saved in the source space file
return spatio_temporal_dist_connectivity(src, n_times, dist)
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade
Parameters
----------
grade : int
Grade of an icosahedral mesh.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_connectivity.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute connectivity from triangles and time instants
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris).tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):
"""Compute connectivity from distances in a source space and time instants
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
n_times : int
Number of time points
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using\n'
'mne_add_patch_info with --dist argument')
edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]
for s in src])
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatial_src_connectivity(src, dist=None, verbose=None):
"""Compute connectivity for a source space activation
Parameters
----------
src : instance of SourceSpaces
The source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_src_connectivity(src, 1, dist)
@verbose
def spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):
"""Compute connectivity from triangles
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)
def spatial_dist_connectivity(src, dist, verbose=None):
"""Compute connectivity from distances in a source space
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_connectivity(src, 1, dist)
def spatial_inter_hemi_connectivity(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric connectivity matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surf')
conn = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
conn = sparse.csr_matrix(conn <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in conn.shape]
conn = sparse.vstack([sparse.hstack([empties[0], conn]),
sparse.hstack([conn.T, empties[1]])])
return conn
@verbose
def _get_connectivity_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create connectivity matrix"""
n_vertices = edges.shape[0]
logger.info("-- number of connected vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int)
connectivity = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices, ) * 2)
return connectivity
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a nifti file
Parameters
----------
fname : string | None
The name of the generated nifti file. If None, the image is only
returned and not saved.
stc : instance of VolSourceEstimate
The source estimate
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
if not isinstance(stc, VolSourceEstimate):
raise Exception('Only volume source estimates can be saved as '
'volumes')
n_times = stc.data.shape[1]
shape = src[0]['shape']
shape3d = (shape[2], shape[1], shape[0])
shape = (n_times, shape[2], shape[1], shape[0])
vol = np.zeros(shape)
mask3d = src[0]['inuse'].reshape(shape3d).astype(np.bool)
if mri_resolution:
mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_vol = np.zeros(mri_shape)
interpolator = src[0]['interpolator']
for k, v in enumerate(vol):
v[mask3d] = stc.data[:, k]
if mri_resolution:
mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d)
if mri_resolution:
vol = mri_vol
vol = vol.T
if mri_resolution:
affine = src[0]['vox_mri_t']['trans'].copy()
else:
affine = src[0]['src_mri_t']['trans'].copy()
if dest == 'mri':
affine = np.dot(src[0]['mri_ras_t']['trans'], affine)
affine[:3] *= 1e3
try:
import nibabel as nib # lazy import to avoid dependency
except ImportError:
raise ImportError("nibabel is required to save volume images.")
header = nib.nifti1.Nifti1Header()
header.set_xyzt_units('mm', 'msec')
header['pixdim'][4] = 1e3 * stc.tstep
with warnings.catch_warnings(record=True): # nibabel<->numpy warning
img = nib.Nifti1Image(vol, affine, header=header)
if fname is not None:
nib.save(img, fname)
return img
def _get_label_flip(labels, label_vertidx, src):
"""Helper function to get sign-flip for labels"""
# do the import here to avoid circular dependency
from .label import label_sign_flip
# get the sign-flip vector for every label
label_flip = list()
for label, vertidx in zip(labels, label_vertidx):
if label.hemi == 'both':
raise ValueError('BiHemiLabel not supported when using sign-flip')
if vertidx is not None:
flip = label_sign_flip(label, src)[:, None]
else:
flip = None
label_flip.append(flip)
return label_flip
@verbose
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, verbose=None):
"""Generator for extract_label_time_course"""
n_labels = len(labels)
# get vertices from source space, they have to be the same as in the stcs
vertno = [s['vertno'] for s in src]
nvert = [len(vn) for vn in vertno]
# do the initialization
label_vertidx = list()
for label in labels:
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertno = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertno)
elif slabel.hemi == 'rh':
this_vertno = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
if len(this_vertidx) == 0:
msg = ('source space does not contain any vertices for label %s'
% label.name)
if not allow_empty:
raise ValueError(msg)
else:
warn_(msg + '. Assigning all-zero time series to label.')
this_vertidx = None # to later check if label is empty
label_vertidx.append(this_vertidx)
# mode-dependent initialization
if mode == 'mean':
pass # we have this here to catch invalid values for mode
elif mode == 'mean_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src)
elif mode == 'pca_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src)
elif mode == 'max':
pass # we calculate the maximum value later
else:
raise ValueError('%s is an invalid mode' % mode)
# loop through source estimates and extract time series
for stc in stcs:
# make sure the stc is compatible with the source space
if len(stc.vertices[0]) != nvert[0] or \
len(stc.vertices[1]) != nvert[1]:
raise ValueError('stc not compatible with source space')
if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels, stc.data.shape[1]),
dtype=stc.data.dtype)
if mode == 'mean':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.mean(stc.data[vertidx, :], axis=0)
elif mode == 'mean_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0)
elif mode == 'pca_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
U, s, V = linalg.svd(stc.data[vertidx, :],
full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(vertidx))
label_tc[i] = sign * scale * V[0]
elif mode == 'max':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.max(np.abs(stc.data[vertidx, :]), axis=0)
else:
raise ValueError('%s is an invalid mode' % mode)
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='mean_flip',
allow_empty=False, return_generator=False,
verbose=None):
"""Extract label time course for lists of labels and source estimates
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter.
Valid values for mode are:
- 'mean': Average within each label.
- 'mean_flip': Average within each label with sign flip depending
on source orientation.
- 'pca_flip': Apply an SVD to the time courses within each label
and use the scaled and sign-flipped first right-singular vector
as the label time course. The scaling is performed such that the
power of the label time course is the same as the average
per-vertex time course power within the label. The sign of the
resulting time course is adjusted by multiplying it with
"sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This
procedure assures that the phase does not randomly change by 180
degrees from one stc to the next.
- 'max': Max value within each label.
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
labels : Label | list of Label
The labels for which to extract the time course.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time courses for labels
that do not have any vertices in the source estimate.
return_generator : bool
If True, a generator instead of a list is returned.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label_tc : array | list (or generator) of array, shape=(len(labels), n_times)
Extracted time course for each label and source estimate.
""" # noqa
# convert inputs to lists
if isinstance(stcs, SourceEstimate):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
if not isinstance(labels, list):
labels = [labels]
label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,
allow_empty=allow_empty)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
| alexandrebarachant/mne-python | mne/source_estimate.py | Python | bsd-3-clause | 114,461 | [
"Mayavi"
] | 0fe0a612ee2bd88bea880caecd107f5377e4a645956ce008bf44312a75821c05 |
from .layer import Layer
from ..activation import Activation
from ..potential import Potential
import numpy as np
class Particle3(object):
"""
2D monopole input, dipole output
"""
def __init__(self, input_size=0, output_size=0, activation="sigmoid", potential="gaussian", s=1.0, q=None, b=None,
qoff=0.0):
self.input_size = input_size
self.output_size = output_size
self.activation_name = activation.lower()
self.activation = Activation.get(activation)
self.d_activation = Activation.get_d(activation)
self.potential = Potential.get(potential)
self.d_potential = Potential.get_d(potential)
self.w = None
# Weight initialization
g = np.sqrt(2.0 / (input_size + output_size))
if b is None:
b = g
self.b = np.random.uniform(-b, b, (1, output_size))
# Charges
if q is None:
q = g
self.q = np.random.uniform(-q, q, output_size) + qoff
self.rx_inp = np.random.uniform(-s, s, input_size)
self.ry_inp = np.random.uniform(-s, s, input_size)
self.rx_pos_out = np.random.uniform(-s, s, output_size)
self.ry_pos_out = np.random.uniform(-s, s, output_size)
self.rx_neg_out = np.random.uniform(-s, s, output_size)
self.ry_neg_out = np.random.uniform(-s, s, output_size)
def feed_forward(self, a_in):
return self.compute_a(self.compute_z(a_in))
def compute_z(self, a_in):
atrans = a_in.transpose()
z = np.zeros((self.output_size, len(a_in)))
for j in range(self.output_size):
dx = self.rx_inp - self.rx_pos_out[j]
dy = self.ry_inp - self.ry_pos_out[j]
r = np.sqrt(dx ** 2 + dy ** 2)
potential = self.potential(r)
dx = self.rx_inp - self.rx_neg_out[j]
dy = self.ry_inp - self.ry_neg_out[j]
r = np.sqrt(dx ** 2 + dy ** 2)
potential -= self.potential(r)
z[j] = self.b[0][j] + self.q[j] * potential.dot(atrans)
return z.transpose()
def compute_a(self, z):
return self.activation(z)
def compute_da(self, z):
return self.d_activation(z)
def compute_w(self):
w = np.zeros((self.input_size, self.output_size))
for j in range(self.output_size):
dx = self.rx_inp - self.rx_pos_out[j]
dy = self.ry_inp - self.ry_pos_out[j]
r = np.sqrt(dx ** 2 + dy ** 2)
potential = self.potential(r)
dx = self.rx_inp - self.rx_neg_out[j]
dy = self.ry_inp - self.ry_neg_out[j]
r = np.sqrt(dx ** 2 + dy ** 2)
potential -= self.potential(r)
potential = self.q[j] * potential
for i in range(self.input_size):
w[i][j] = potential[i]
self.w = w
return w
def compute_w_sum_square(self):
total = 0.0
for j in range(self.output_size):
dx = self.rx_inp - self.rx_pos_out[j]
dy = self.ry_inp - self.ry_pos_out[j]
potential = np.exp(-(dx**2 + dy**2))
dx = self.rx_inp - self.rx_neg_out[j]
dy = self.ry_inp - self.ry_neg_out[j]
potential -= np.exp(-(dx**2 + dy**2))
potential = self.q[j] * potential
total += np.sum(potential * potential)
return total
| awlange/brainsparks | src/calrissian/layers/particle3.py | Python | mit | 3,421 | [
"Gaussian"
] | f80fdfa998fba9ae7ec5b360efc4aabd7d735a1840cee6ea7919f3e598d86cb8 |
from octopus.server.DBInterface import DBInterface
import os
from octopus.mlutils.pythonEmbedder.PythonEmbedder import Embedder
class APIEmbedder(object):
def __init__(self):
self._initializeDBConnection()
def _initializeDBConnection(self):
self.dbInterface = DBInterface()
def setOutputDirectory(self, directory):
self.outputDirectory = directory
def run(self):
try:
# Will throw error if output directory already exists
self._initializeOutputDirectory()
except:
return
self._connectToDatabase()
functions = self._getAPISymbolsFromDatabase()
self._writeDataPoints(functions)
self._finalizeOutputDirectory()
self._embed()
def _embed(self):
# self.embedder = SallyBasedEmbedder()
self.embedder = Embedder()
self.embedder.embed(self.outputDirectory)
def _connectToDatabase(self):
self.dbInterface.connectToDatabase()
def _writeDataPoints(self, functions):
for (funcId, symbols) in functions:
self.toc.write("%d\n" % (funcId))
self._addDataPoint(symbols)
def _addDataPoint(self, symbols):
datapointFilename = os.path.join(self.dataDir, str(self.curDatapoint))
f = file(datapointFilename, 'w')
f.writelines([x + "\n" for x in symbols])
f.close()
self.curDatapoint += 1
def _initializeOutputDirectory(self):
directory = self.outputDirectory
if os.path.exists(directory):
raise
self.dataDir = os.path.join(directory, 'data')
self.tocFilename = os.path.join(directory, 'TOC')
os.makedirs(self.dataDir)
self.toc = file(self.tocFilename, 'w')
self.curDatapoint = 0
def _finalizeOutputDirectory(self):
self.toc.close()
def _getAPISymbolsFromDatabase(self):
CHUNK_SIZE = 1024
query = """queryNodeIndex('type:Function').id"""
functionIds = self._runGremlinQuery(query)
result = []
for chunk in self.chunks(functionIds, CHUNK_SIZE):
query = """
_().transform{ %s }.scatter().transform{g.v(it)}
.sideEffect{funcId = it.id}
.transform{ [funcId, it.functionToAPISymbolNodes().code.toList()] }
""" % (str(chunk))
result.extend(self._runGremlinQuery(query))
return result
def chunks(self, l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def _runGremlinQuery(self, query):
return self.dbInterface.runGremlinQuery(query)
| octopus-platform/joern | python/joern-tools/joern/APIEmbedder.py | Python | lgpl-3.0 | 2,776 | [
"Octopus"
] | 0c597fde7e038cb89ddeea52b263fc4cb748a69055964f0513fabf2545701adc |
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-06-12 18:41:25
# @Last modified by: andrews
# @Last modified time: 2018-03-02 17:03:81
from __future__ import print_function, division, absolute_import
import pytest
@pytest.fixture()
def nsa_target(maindb, galaxy):
nsa = maindb.session.query(maindb.sampledb.NSA).join(maindb.sampledb.MangaTargetToNSA,
maindb.sampledb.MangaTarget).\
filter(maindb.sampledb.MangaTarget.mangaid == galaxy.mangaid).one()
yield (nsa, galaxy.plateifu)
nsa = None
class TestSampleDB(object):
@pytest.mark.parametrize('plateifu, expected',
[('8485-1901', {'u': 18.69765903,
'g': 17.45450578,
'r': 16.80842176,
'i': 16.43652498,
'z': 16.20534984}),
('7443-12701', {'u': 17.07501837,
'g': 15.57770095,
'r': 14.95969099,
'i': 14.63861064,
'z': 14.44369601})])
def test_elpetro_mag(self, nsa_target, plateifu, expected):
nsa_target, galaxy_plateifu = nsa_target
if galaxy_plateifu != plateifu:
pytest.skip('Skip non-matching plateifus.')
for band, value in expected.items():
assert getattr(nsa_target, 'elpetro_mag_{0}'.format(band)) == pytest.approx(value)
@pytest.mark.parametrize('plateifu, expected',
[('8485-1901', {('u', 'g'): 1.24315324,
('g', 'r'): 0.64608403,
('r', 'i'): 0.37189678,
('i', 'z'): 0.23117514}),
('7443-12701', {('u', 'g'): 1.49731742,
('g', 'r'): 0.61800996,
('r', 'i'): 0.32108036,
('i', 'z'): 0.19491463})])
def test_elpetro_colour(self, nsa_target, plateifu, expected):
nsa_target, galaxy_plateifu = nsa_target
if galaxy_plateifu != plateifu:
pytest.skip('Skip non-matching plateifus.')
for bands, value in expected.items():
bandA, bandB = bands
assert nsa_target.elpetro_colour(bandA, bandB) == pytest.approx(value)
elpetro_mag_colour = getattr(nsa_target, 'elpetro_mag_{0}_{1}'.format(bandA, bandB))
assert elpetro_mag_colour == pytest.approx(value)
@pytest.mark.parametrize('plateifu, expected',
[('8485-1901', {('u', 'g'): 1.1655902862549006,
('g', 'r'): 0.5961246490479013,
('r', 'i'): 0.3375816345214986,
('i', 'z'): 0.20068740844720168}),
('7443-12701', {('u', 'g'): 1.3728961944580007,
('g', 'r'): 0.5836753845213991,
('r', 'i'): 0.27035522460939987,
('i', 'z'): 0.1656112670899006})])
def test_elpetro_absmag_colour(self, nsa_target, plateifu, expected):
nsa_target, galaxy_plateifu = nsa_target
if galaxy_plateifu != plateifu:
pytest.skip('Skip non-matching plateifus.')
for bands, value in expected.items():
bandA, bandB = bands
assert nsa_target.elpetro_absmag_colour(bandA, bandB) == pytest.approx(value)
colour = 'elpetro_absmag_{0}_{1}'.format(bandA, bandB)
elpetro_absmag_colour = getattr(nsa_target, colour)
assert elpetro_absmag_colour == pytest.approx(value)
| sdss/marvin | tests/misc/test_sampledb.py | Python | bsd-3-clause | 4,148 | [
"Brian",
"Galaxy"
] | 0370adf98b1d3c96f6c2093bd57630b3f4c49da305d272207f61567c596e76ac |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Utilities to compute mean and quantile curves
"""
import numpy
import pandas
from scipy.stats import norm
from scipy.special import ndtr
from openquake.baselib.general import AccumDict, agg_probs
def _truncnorm_sf(truncation_level, values):
"""
Survival function for truncated normal distribution.
Assumes zero mean, standard deviation equal to one and symmetric
truncation.
:param truncation_level:
Positive float number representing the truncation on both sides
around the mean, in units of sigma, or None, for non-truncation
:param values:
Numpy array of values as input to a survival function for the given
distribution.
:returns:
Numpy array of survival function results in a range between 0 and 1.
>>> from scipy.stats import truncnorm
>>> truncnorm(-3, 3).sf(0.12345) == _truncnorm_sf(3, 0.12345)
True
>>> from scipy.stats import norm
>>> norm.sf(0.12345) == _truncnorm_sf(None, 0.12345)
True
"""
if truncation_level == 0:
return values
if truncation_level is None:
return ndtr(- values)
# notation from http://en.wikipedia.org/wiki/Truncated_normal_distribution.
# given that mu = 0 and sigma = 1, we have alpha = a and beta = b.
# "CDF" in comments refers to cumulative distribution function
# of non-truncated distribution with that mu and sigma values.
# assume symmetric truncation, that is ``a = - truncation_level``
# and ``b = + truncation_level``.
# calculate CDF of b
phi_b = ndtr(truncation_level)
# calculate Z as ``Z = CDF(b) - CDF(a)``, here we assume that
# ``CDF(a) == CDF(- truncation_level) == 1 - CDF(b)``
z = phi_b * 2 - 1
# calculate the result of survival function of ``values``,
# and restrict it to the interval where probability is defined --
# 0..1. here we use some transformations of the original formula
# that is ``SF(x) = 1 - (CDF(x) - CDF(a)) / Z`` in order to minimize
# number of arithmetic operations and function calls:
# ``SF(x) = (Z - CDF(x) + CDF(a)) / Z``,
# ``SF(x) = (CDF(b) - CDF(a) - CDF(x) + CDF(a)) / Z``,
# ``SF(x) = (CDF(b) - CDF(x)) / Z``.
return ((phi_b - ndtr(values)) / z).clip(0.0, 1.0)
def norm_cdf(x, a, s):
"""
Gaussian cumulative distribution function; if s=0, returns an
Heaviside function instead. NB: for x=a, 0.5 is returned for all s.
>>> norm_cdf(1.2, 1, .1)
0.9772498680518208
>>> norm_cdf(1.2, 1, 0)
1.0
>>> norm_cdf(.8, 1, .1)
0.022750131948179216
>>> norm_cdf(.8, 1, 0)
0.0
>>> norm_cdf(1, 1, .1)
0.5
>>> norm_cdf(1, 1, 0)
0.5
"""
if s == 0:
return numpy.heaviside(x - a, .5)
else:
return norm.cdf(x, loc=a, scale=s)
def calc_momenta(array, weights):
"""
:param array: an array of shape E, ...
:param weights: an array of length E
:returns: an array of shape (2, ...) with the first two statistical moments
"""
momenta = numpy.zeros((2,) + array.shape[1:])
momenta[0] = numpy.einsum('i,i...', weights, array)
momenta[1] = numpy.einsum('i,i...', weights, array**2)
return momenta
def calc_avg_std(momenta, totweight):
"""
:param momenta: an array of shape (2, ...) obtained via calc_momenta
:param totweight: total weight to divide for
:returns: an array of shape (2, ...) with average and standard deviation
>>> arr = numpy.array([[2, 4, 6], [3, 5, 7]])
>>> weights = numpy.ones(2)
>>> calc_avg_std(calc_momenta(arr, weights), weights.sum())
array([[2.5, 4.5, 6.5],
[0.5, 0.5, 0.5]])
"""
avgstd = numpy.zeros_like(momenta)
avgstd[0] = avg = momenta[0] / totweight
avgstd[1] = numpy.sqrt(numpy.maximum(momenta[1] / totweight - avg ** 2, 0))
return avgstd
def avg_std(array, weights=None):
"""
:param array: an array of shape E, ...
:param weights: an array of length E (or None for equal weights)
:returns: an array of shape (2, ...) with average and standard deviation
>>> avg_std(numpy.array([[2, 4, 6], [3, 5, 7]]))
array([[2.5, 4.5, 6.5],
[0.5, 0.5, 0.5]])
"""
if weights is None:
weights = numpy.ones(len(array))
return calc_avg_std(calc_momenta(array, weights), weights.sum())
def geom_avg_std(array, weights=None):
"""
:returns: geometric mean and geometric stddev (see
https://en.wikipedia.org/wiki/Log-normal_distribution)
"""
return numpy.exp(avg_std(numpy.log(array), weights))
def mean_curve(values, weights=None):
"""
Compute the mean by using numpy.average on the first axis.
"""
if weights is None:
weights = [1. / len(values)] * len(values)
if not isinstance(values, numpy.ndarray):
values = numpy.array(values)
return numpy.average(values, axis=0, weights=weights)
def std_curve(values, weights=None):
if weights is None:
weights = [1. / len(values)] * len(values)
m = mean_curve(values, weights)
res = numpy.sqrt(numpy.einsum('i,i...', weights, (m - values) ** 2))
return res
# NB: for equal weights and sorted values the quantile is computed as
# numpy.interp(q, [1/N, 2/N, ..., N/N], values)
def quantile_curve(quantile, curves, weights=None):
"""
Compute the weighted quantile aggregate of an array or list of arrays
:param quantile:
Quantile value to calculate. Should be in the range [0.0, 1.0].
:param curves:
R arrays
:param weights:
R weights with sum 1, or None
:returns:
A numpy array representing the quantile of the underlying arrays
>>> arr = numpy.array([.15, .25, .3, .4, .5, .6, .75, .8, .9])
>>> quantile_curve(.8, arr)
array(0.76)
"""
if not isinstance(curves, numpy.ndarray):
curves = numpy.array(curves)
R = len(curves)
if weights is None:
weights = numpy.ones(R) / R
else:
weights = numpy.array(weights)
assert len(weights) == R, (len(weights), R)
result = numpy.zeros(curves.shape[1:])
for idx, _ in numpy.ndenumerate(result):
data = numpy.array([a[idx] for a in curves])
sorted_idxs = numpy.argsort(data)
cum_weights = numpy.cumsum(weights[sorted_idxs])
# get the quantile from the interpolated CDF
result[idx] = numpy.interp(quantile, cum_weights, data[sorted_idxs])
return result
def max_curve(values, weights=None):
"""
Compute the maximum curve by taking the upper limits of the values;
the weights are ignored and present only for API compatibility.
The values can be arrays and then the maximum is taken pointwise:
>>> max_curve([numpy.array([.3, .2]), numpy.array([.1, .4])])
array([0.3, 0.4])
"""
return numpy.max(values, axis=0)
def compute_pmap_stats(pmaps, stats, weights, imtls):
"""
:param pmaps:
a list of R probability maps
:param stats:
a sequence of S statistic functions
:param weights:
a list of ImtWeights
:param imtls:
a DictArray of intensity measure types
:returns:
a probability map with S internal values
"""
sids = set()
p0 = next(iter(pmaps))
L = p0.shape_y
for pmap in pmaps:
sids.update(pmap)
assert pmap.shape_y == L, (pmap.shape_y, L)
if len(sids) == 0:
raise ValueError('All empty probability maps!')
sids = numpy.array(sorted(sids), numpy.uint32)
nstats = len(stats)
curves = numpy.zeros((len(pmaps), len(sids), L), numpy.float64)
for i, pmap in enumerate(pmaps):
for j, sid in enumerate(sids):
if sid in pmap:
curves[i, j] = pmap[sid].array[:, 0]
out = p0.__class__.build(L, nstats, sids)
for imt in imtls:
slc = imtls(imt)
w = [weight[imt] if hasattr(weight, 'dic') else weight
for weight in weights]
if sum(w) == 0: # expect no data for this IMT
continue
for i, array in enumerate(compute_stats(curves[:, :, slc], stats, w)):
for j, sid in numpy.ndenumerate(sids):
out[sid].array[slc, i] = array[j]
return out
def calc_stats(df, kfields, stats, weights):
"""
:param df: a pandas DataFrame with a column rlz_id
:param kfields: fields used in the group by
:param stats: a dictionary stat_name->stat_func
:param weights: an array of weights for each realization
:returns: a DataFrame with the statistics
"""
acc = AccumDict(accum=[])
vfields = [f for f in df.columns if f not in kfields and f != 'rlz_id']
# in aggrisk kfields=['agg_id', 'loss_type']
# in aggcurves kfields=['agg_id', 'return_period', 'loss_type']
for key, group in df.groupby(kfields):
for name, func in stats.items():
for k, kf in zip(key, kfields):
acc[kf].append(k)
for vf in vfields:
values = numpy.zeros_like(weights) # shape R
values[group.rlz_id] = getattr(group, vf).to_numpy()
v = apply_stat(func, values, weights)
acc[vf].append(v)
acc['stat'].append(name)
return pandas.DataFrame(acc)
# NB: this is a function linear in the array argument
def compute_stats(array, stats, weights):
"""
:param array:
an array of R elements (which can be arrays)
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of S elements (which can be arrays)
"""
result = numpy.zeros((len(stats),) + array.shape[1:], array.dtype)
for i, func in enumerate(stats):
result[i] = apply_stat(func, array, weights)
return result
# like compute_stats, but on a matrix of shape (N, R)
def compute_stats2(arrayNR, stats, weights):
"""
:param arrayNR:
an array of (N, R) elements
:param stats:
a sequence of S statistic functions
:param weights:
a list of R weights
:returns:
an array of (N, S) elements
"""
newshape = list(arrayNR.shape)
if newshape[1] != len(weights):
raise ValueError('Got %d weights but %d values!' %
(len(weights), newshape[1]))
newshape[1] = len(stats) # number of statistical outputs
newarray = numpy.zeros(newshape, arrayNR.dtype)
data = [arrayNR[:, i] for i in range(len(weights))]
for i, func in enumerate(stats):
newarray[:, i] = apply_stat(func, data, weights)
return newarray
def apply_stat(f, arraylist, *extra, **kw):
"""
:param f: a callable arraylist -> array (of the same shape and dtype)
:param arraylist: a list of arrays of the same shape and dtype
:param extra: additional positional arguments
:param kw: keyword arguments
:returns: an array of the same shape and dtype
Broadcast statistical functions to composite arrays. Here is an example:
>>> dt = numpy.dtype([('a', (float, 2)), ('b', float)])
>>> a1 = numpy.array([([1, 2], 3)], dt)
>>> a2 = numpy.array([([4, 5], 6)], dt)
>>> apply_stat(mean_curve, [a1, a2])
array([([2.5, 3.5], 4.5)], dtype=[('a', '<f8', (2,)), ('b', '<f8')])
"""
dtype = arraylist[0].dtype
shape = arraylist[0].shape
if dtype.names: # composite array
new = numpy.zeros(shape, dtype)
for name in dtype.names:
new[name] = f([arr[name] for arr in arraylist], *extra, **kw)
return new
else: # simple array
return f(arraylist, *extra, **kw)
def set_rlzs_stats(dstore, prefix, **attrs):
"""
:param dstore: a DataStore object
:param prefix: dataset prefix, assume <prefix>-rlzs is already stored
"""
arrayNR = dstore[prefix + '-rlzs'][()]
R = arrayNR.shape[1]
pairs = list(attrs.items())
pairs.insert(1, ('rlz', numpy.arange(R)))
dstore.set_shape_descr(prefix + '-rlzs', **dict(pairs))
if R > 1:
stats = dstore['oqparam'].hazard_stats()
if not stats:
return
statnames, statfuncs = zip(*stats.items())
weights = dstore['weights'][()]
name = prefix + '-stats'
dstore[name] = compute_stats2(arrayNR, statfuncs, weights)
pairs = list(attrs.items())
pairs.insert(1, ('stat', statnames))
dstore.set_shape_descr(name, **dict(pairs))
def combine_probs(values_by_grp, cmakers, rlz):
"""
:param values_by_grp: C arrays of shape (D1, D2..., G)
:param cmakers: C ContextMakers with G gsims each
:param rlz: a realization index
:returns: array of shape (D1, D2, ...)
"""
probs = []
for values, cmaker in zip(values_by_grp, cmakers):
assert values.shape[-1] == len(cmaker.gsims)
for g, rlzs in enumerate(cmaker.gsims.values()):
if rlz in rlzs:
probs.append(values[..., g])
return agg_probs(*probs)
def average_df(dframes, weights=None):
"""
Compute weighted average of DataFrames with the same index and columns.
>>> df1 = pandas.DataFrame(dict(value=[1, 1, 1]), [1, 2, 3])
>>> df2 = pandas.DataFrame(dict(value=[2, 2, 2]), [1, 2, 3])
>>> average_df([df1, df2], [.4, .6])
value
1 1.6
2 1.6
3 1.6
"""
d0 = dframes[0]
n = len(dframes)
if n == 1:
return d0
elif weights is None:
weights = numpy.ones(n)
elif len(weights) != n:
raise ValueError('There are %d weights for %d dataframes!' %
(len(weights), n))
data = numpy.average([df.to_numpy() for df in dframes],
weights=weights, axis=0) # shape (E, C)
return pandas.DataFrame({
col: data[:, c] for c, col in enumerate(d0.columns)}, d0.index)
| gem/oq-engine | openquake/hazardlib/stats.py | Python | agpl-3.0 | 14,555 | [
"Gaussian"
] | f523483e6069ba2956471c06947f24f279e9e0c76e4814ff9f61a4851bdb2c25 |
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import shutil
import unittest
from monty.tempfile import ScratchDir
from pymatgen.core import SETTINGS
from pymatgen.analysis.chemenv.utils.chemenv_config import ChemEnvConfig
from pymatgen.util.testing import PymatgenTest
config_file_dir = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"chemenv",
"config",
)
class ChemenvConfigTest(unittest.TestCase):
def test_chemenv_config(self):
with ScratchDir("."):
config = ChemEnvConfig()
if SETTINGS.get("PMG_MAPI_KEY", "") != "":
self.assertTrue(config.has_materials_project_access)
else:
self.assertFalse(config.has_materials_project_access)
package_options = ChemEnvConfig.DEFAULT_PACKAGE_OPTIONS
package_options["default_max_distance_factor"] = 1.8
config = ChemEnvConfig(package_options=package_options)
self.assertEqual(
config.package_options_description(),
"Package options :\n"
" - Maximum distance factor : 1.8000\n"
' - Default strategy is "SimplestChemenvStrategy" :\n'
" Simplest ChemenvStrategy using fixed angle and distance parameters \n"
" for the definition of neighbors in the Voronoi approach. \n"
" The coordination environment is then given as the one with the \n"
" lowest continuous symmetry measure.\n"
" with options :\n"
" - distance_cutoff : 1.4\n"
" - angle_cutoff : 0.3\n"
" - additional_condition : 1\n"
" - continuous_symmetry_measure_cutoff : 10.0\n",
)
config.save(root_dir="tmp_dir")
config = config.auto_load(root_dir="tmp_dir")
self.assertEqual(config.package_options, package_options)
if __name__ == "__main__":
unittest.main()
| vorwerkc/pymatgen | pymatgen/analysis/chemenv/utils/tests/test_chemenv_config.py | Python | mit | 2,012 | [
"pymatgen"
] | 2ef648fd2ed20a978aa737a0e4acb0b993029df85646b43e84ba3a4b9f7ef1be |
import requests
import os
from bs4 import BeautifulSoup
from beatmap import *
class OsuWebConnection:
# new site
home_url = "https://osu.ppy.sh/home"
login_url = "https://osu.ppy.sh/session"
def __init__(self):
self.session = requests.Session()
print("Login:")
self.login = input()
print("Password:")
self.password = input()
self.token = ""
self.initial_connection()
self.do_login()
def initial_connection(self):
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0'})
self.session.headers.update({'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'})
self.session.headers.update({'Accept-Language': 'en-US,en;q=0.5'})
self.session.headers.update({'Accept-Encoding': 'gzip, deflate'})
self.session.headers.update({'Connection': 'keep-alive'})
r = self.session.get(self.home_url)
soup = BeautifulSoup(r.text, 'html.parser')
self.token = soup.find('input').attrs['value']
print("Session token: " + self.token)
def do_login(self):
print("Logging in osu! site with user " + self.login + "....")
r = self.session.post(OsuWebConnection.login_url,
data={'_token': self.token,
'username': self.login,
'password': self.password})
print(r.text)
"""def is_logged(self):
r = self.session.get(OsuWebConnection.login_url)
text = r.text
if "Username:" in text and "Password:" in text and \
"Log me on automatically each visit" in text and \
"Hide my online status this session" in text:
return False
elif "Announcements (click for more)" in text:
return True
return False
"""
def convert_to_valid_filename(self, filename):
import string
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
return ''.join(c if c in valid_chars else '_' for c in filename)
def download_beatmap(self, beatmap, base_path):
#if not self.is_logged():
# self.do_login()
beatmap_url = "https://osu.ppy.sh/beatmapsets/" + beatmap.beatmapset_id + '/download'
r = self.session.get(beatmap_url, allow_redirects=False)
if 'Page Missing' in r.text:
beatmap.download_status = "NOT AVAILABLE"
return
filename_base = self.convert_to_valid_filename(beatmap.beatmapset_id + " " + beatmap.artist + " - " + beatmap.title)
filename_temp = filename_base + ".temp"
filename_final = filename_base + ".osz"
# beatmap available, download it
#print(r.text)
url = r.headers['location']
print("True Download URL: " + url)
r = self.session.get(url, stream=True)
filesize = int(r.headers['Content-Length']) / 1024.0 / 1024.0
print("Downloading '" + filename_final + "' (%.2f MB)..." % filesize)
with open(base_path + "/" + filename_temp, 'wb') as f:
counter = -1
for chunk in r.iter_content(chunk_size=1023):
if chunk:
f.write(chunk)
counter += 1023
#print(str(int(counter / 1024)) + " bytes |", end='')
os.rename(base_path + "/" + filename_temp, base_path + "/" + filename_final)
print("Finished download of '" + filename_final + "'")
beatmap.download_status = "DOWNLOADED"
def close(self):
self.session.close()
| altur13/osu-Downloader | osu_web_connection.py | Python | gpl-3.0 | 3,627 | [
"VisIt"
] | 8c81e5d7d059a8217249445b15fdd2527ac7d57313b446b3eddac7358a3b82ed |
""" The CountryMapping module performs the necessary CS gymnastics to resolve country codes """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import gConfig, S_OK, S_ERROR
def getCountryMapping(country):
"""Determines the associated country from the country code"""
mappedCountries = [country]
while True:
mappedCountry = gConfig.getValue("/Resources/Countries/%s/AssignedTo" % country, country)
if mappedCountry == country:
break
elif mappedCountry in mappedCountries:
return S_ERROR("Circular mapping detected for %s" % country)
else:
country = mappedCountry
mappedCountries.append(mappedCountry)
return S_OK(mappedCountry)
def getCountryMappingTier1(country):
"""Returns the Tier1 site mapped to a country code"""
res = getCountryMapping(country)
if not res["OK"]:
return res
mappedCountry = res["Value"]
tier1 = gConfig.getValue("/Resources/Countries/%s/Tier1" % mappedCountry, "")
if not tier1:
return S_ERROR("No Tier1 assigned to %s" % mappedCountry)
return S_OK(tier1)
| ic-hep/DIRAC | src/DIRAC/Core/Utilities/CountryMapping.py | Python | gpl-3.0 | 1,218 | [
"DIRAC"
] | 5bce85871d925225eb160c70405a3e946a4b3aa6f49ea5c6e690a13a7aec3bfe |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.