text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# vim: set expandtab shiftwidth=2 softtabstop=2:
# nmi
# "score" - compute the nmi score using tempy
#
from TEMPy.MapParser import MapParser
from TEMPy.ScoringFunctions import ScoringFunctions
from TEMPy.StructureParser import PDBParser
from TEMPy.StructureBlurrer import StructureBlurrer
from TEMPy.class_arg import TempyParser
from traceback import print_exc
import os,sys
from chimerax.core.map.volume import Volume
from chimerax.core.atomic.structure import AtomicStructure
from .util import chimera_to_tempy_model, chimera_to_tempy_map
#calculate map contour
def map_contour(m,t=-1.):
c1 = None
if t != -1.0:
zeropeak,ave,sigma1 = m._peak_density()
if not zeropeak is None: c1 = zeropeak+(t*sigma1)
else:
c1 = 0.0
return c1
#calculate model contour
def model_contour(p,res=4.0,emmap=False,t=-1.):
blurrer = StructureBlurrer()
modelmap = blurrer.gaussian_blur_real_space(p, res,densMap=emmap,normalise=True)
c1 = None
if t != -1.0:
c1 = t*emmap.std()#0.0
return modelmap,c1
def lpfilter(emmap,r):
cutoff = emmap.apix/float(r)
mapfilt = emmap._tanh_lowpass(cutoff)
return mapfilt
def match_grid(emmap1,emmap2,c1,c2):
# DETERMINE A COMMON ALIGNMENT BOX : fill minvalue for extra voxel pads
spacing = emmap2.apix
if emmap2.apix < emmap1.apix: spacing = emmap1.apix
grid_shape, new_ori = emmap1._alignment_box(emmap2,spacing)
# INTERPOLATE TO NEW GRID
try: emmap_1 = emmap1._interpolate_to_grid1(grid_shape,spacing,new_ori)
except: emmap_1 = emmap1._interpolate_to_grid(grid_shape,spacing,new_ori)
try: c1 = emmap_1._find_level(np.sum(emmap1.fullMap>c1)*(emmap1.apix**3))
except: pass
del emmap1.fullMap
del emmap1
try: emmap_2 = emmap2._interpolate_to_grid1(grid_shape,spacing,new_ori)
except: emmap_2 = emmap2._interpolate_to_grid(grid_shape,spacing,new_ori)
try: c2 = emmap_2._find_level(np.sum(emmap2.fullMap>c2)*(emmap2.apix**3))
except: pass
del emmap2.fullMap
del emmap2
return emmap_1, emmap_2
def score_cmd(session, comparators, compared, rez_comparators, rez_compared, contours_comparators, contour_compared):
sc = ScoringFunctions()
blurrer = StructureBlurrer()
# Loop through these to be compared
idx = 0
scores = []
for comparator in comparators:
emmap1 = None
emmap2 = None
if type(comparator) is AtomicStructure:
if type(compared) is AtomicStructure:
# Both models
if None in ([rez_compared] + rez_comparators):
print("Please provide the resolution for all models")
return
bms1 = chimera_to_tempy_model(compared)
bms2 = chimera_to_tempy_model(comparator)
emmap1 = model_contour( bms1, rez_compared, emmap=False,t=0.5)
if contours_comparators[idx] is None:
emmap2 = model_contour(bms2, rez_comparators[idx],emmap=False,t=0.5)
else:
emmap2 = blur_model(bms2, rez_comparators[idx], emmap=False)
else:
# 0 - map, 1 - model
if rez_comparators[idx] == None:
print("Please provide the resolution for the model.")
return
emmap1 = chimera_to_tempy_map(compared)
bms = chimera_to_tempy_model(comparator)
emmap2 = blurrer.gaussian_blur(bms, rez_compared, densMap=emmap1)
else:
if type(compared) is AtomicStructure:
# 0 - model, 1 - map
if rez_compared == None:
print("Please provide the resolution for the model.")
return
emmap2 = chimera_to_tempy_map(comparator)
bms = chimera_to_tempy_model(compared)
emmap1 = blurrer.gaussian_blur(bms, rez_compared, densMap=emmap2)
else:
# 0 - map, 1 - map
emmap1 = chimera_to_tempy_map(compared)
emmap2 = chimera_to_tempy_map(comparator)
c1 = contour_compared
# Contouring
if c1 == None:
c1 = map_contour(emmap1,t=1.5)
c2 = contours_comparators[idx]
# This kinda makes no sense and could be tricky
if c2 == None:
c2 = map_contour(emmap2,t=1.5)
# Some kind of fix if the maps don't match?
# Resize, resample or blur of somekind
if not sc.mapComparison(emmap1,emmap2):
emmap1._crop_box(c1,0.5)
emmap2._crop_box(c2,0.5)
if rez_compared > 1.25*rez_comparators[idx]:
emmap_2 = lpfilter(emmap2,rez_compared)
emmap1, emmap2 = match_grid(emmap1,emmap_2,c1,c2)
elif rez_comparators[idx] > 1.25*rez_compared:
emmap_1 = lpfilter(emmap1,rez_comparators[idx])
emmap1, emmap2 = match_grid(emmap_1,emmap2,c1,c2)
else:
emmap1, emmap2 = match_grid(emmap1,emmap2,c1,c2)
nmi = 0.0
try:
nmi = sc.MI(emmap1,emmap2,c1,c2,1,None,None,True)
if nmi < 0.0: nmi = 0.0
except:
print('Exception for NMI score')
print_exc()
nmi = 0.0
scores.append(nmi)
idx+=1
return scores
# TODO - simplify this function down a bit
def score(session, atomic_model1 = None, map_model1 = None, atomic_model2 = None, map_model2 = None, rez1 = None, rez2 = None, c1 = None, c2 = None):
""" Generate the NMI score for 2 maps or 1 map and 1 model. """
sc = ScoringFunctions()
# We have choices - 1 map and one model, 2 maps or 2 models
emmap1 = None
emmap2 = None
blurrer = StructureBlurrer()
if atomic_model1 != None and map_model1 != None:
# 1 map 1 model
if rez1 == None:
print("Please provide the resolution for the model.")
return
emmap1 = chimera_to_tempy_map(map_model1)
bms = chimera_to_tempy_model(atomic_model1)
emmap2 = blurrer.gaussian_blur(bms, rez1, densMap=emmap1)
elif map_model1 != None and map_model2 != None:
# 2 maps
emmap1 = chimera_to_tempy_map(map_model1)
emmap2 = chimera_to_tempy_map(map_model2)
elif atomic_model1 != None and atomic_model2 != None:
# 2 models
if None in [rez1,rez2]:
print("Please provide the resolution for both model")
return
bms1 = chimera_to_tempy_model(atomic_model1)
bms2 = chimera_to_tempy_model(atomic_model2)
emmap1 = model_contour( bms1, rez1, emmap=False,t=0.5)
if c2 is None:
emmap2 = model_contour(bms2, rez2,emmap=False,t=0.5)
else:
emmap2 = blur_model( bms2, rez2, emmap=False)
else:
print("Error. Must have 1 model and 1 map, 2 maps or 2 models")
return
# Contouring
if c1 == None:
c1 = map_contour(emmap1,t=1.5)
if c2 == None:
c2 = map_contour(emmap2,t=1.5)
# Some kind of fix if the maps don't match?
# Resize, resample or blur of somekind
if not sc.mapComparison(emmap1,emmap2):
emmap1._crop_box(c1,0.5)
emmap2._crop_box(c2,0.5)
if rez1 > 1.25*rez2:
emmap_2 = lpfilter(emmap2,rez1)
emmap1, emmap2 = match_grid(emmap1,emmap_2,c1,c2)
elif rez2 > 1.25*rez1:
emmap_1 = lpfilter(emmap1,rez2)
emmap1, emmap2 = match_grid(emmap_1,emmap2,c1,c2)
else:
emmap1, emmap2 = match_grid(emmap1,emmap2,c1,c2)
nmi = 0.0
try:
nmi = sc.MI(emmap1,emmap2,c1,c2,1,None,None,True)
if nmi < 0.0: nmi = 0.0
except:
print('Exception for NMI score')
print_exc()
nmi = 0.0
return nmi
|
OniDaito/ChimeraXTempy
|
src/nmi.py
|
Python
|
mit
| 7,191
|
[
"ChimeraX"
] |
4aa695fa26ac8dbbae3e0a4ed74982493bd2159c729b59f6919a2d3fba601a0e
|
from ovito.io import import_file
from ovito.modifiers import ConstructSurfaceModifier
# Load a particle structure and construct its geometric surface:
node = import_file("simulation.dump")
mod = ConstructSurfaceModifier(radius = 2.9)
node.modifiers.append(mod)
node.compute()
# Query computed surface properties:
print("Surface area: %f" % node.output.attributes['ConstructSurfaceMesh.surface_area'])
print("Solid volume: %f" % node.output.attributes['ConstructSurfaceMesh.solid_volume'])
fraction = node.output.attributes['ConstructSurfaceMesh.solid_volume'] / node.output.cell.volume
print("Solid volume fraction: %f" % fraction)
# Export the surface triangle mesh to a VTK file.
mesh = node.output.surface
mesh.export_vtk('surface.vtk', node.output.cell)
|
srinath-chakravarthy/ovito
|
doc/python/example_snippets/construct_surface_modifier.py
|
Python
|
gpl-3.0
| 761
|
[
"OVITO",
"VTK"
] |
eae798285655b90db22e2fabe6153b6bddc418c5cfc1e394f63ffa6bcd64bd58
|
../../../../../../../share/pyshared/orca/scripts/toolkits/WebKitGtk/structural_navigation.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/toolkits/WebKitGtk/structural_navigation.py
|
Python
|
gpl-3.0
| 92
|
[
"ORCA"
] |
d4665a72da678bbbeecfe872e0b8d2ff2a693bfe8b3e6e5cacd7d5e18bddb76b
|
"""Generate the quadrature nodes and weights in Clenshaw-Curtis quadrature."""
try:
from functools import lru_cache
except ImportError: # pragma: no coverage
from functools32 import lru_cache
import numpy
import chaospy
from .hypercube import hypercube_quadrature
def clenshaw_curtis(order, domain=(0., 1.), growth=False, segments=1):
"""
Generate the quadrature nodes and weights in Clenshaw-Curtis quadrature.
Clenshaw-Curtis quadrature method is a good all-around quadrature method
comparable to Gaussian quadrature, but typically limited to finite
intervals without a specific weight function. In addition to be quite
accurate, the weights and abscissas can be calculated quite fast.
Another thing to note is that Clenshaw-Curtis, with an appropriate growth
rule is fully nested. This means, if one applies a method that combines
different order of quadrature rules, the number of evaluations can often be
reduced as the abscissas can be used across levels.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (:class:`chaospy.Distribution`, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
segments (int):
Split intervals into steps subintervals and create a patched
quadrature based on the segmented quadrature. Can not be lower than
`order`. If 0 is provided, default to square root of `order`.
Nested samples only appear when the number of segments are fixed.
Returns:
abscissas (numpy.ndarray):
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), steps)`` where ``steps`` is
the number of samples.
weights (numpy.ndarray):
The quadrature weights with ``weights.shape == (steps,)``.
Notes:
Implemented as proposed by Waldvogel :cite:`waldvogel_fast_2006`.
Example:
>>> abscissas, weights = chaospy.quadrature.clenshaw_curtis(4, (0, 1))
>>> abscissas.round(4)
array([[0. , 0.1464, 0.5 , 0.8536, 1. ]])
>>> weights.round(4)
array([0.0333, 0.2667, 0.4 , 0.2667, 0.0333])
See also:
:func:`chaospy.quadrature.gaussian`
:func:`chaospy.quadrature.fejer_1`
:func:`chaospy.quadrature.fejer_2`
"""
order = numpy.asarray(order)
order = numpy.where(growth, numpy.where(order > 0, 2**order, 0), order)
return hypercube_quadrature(
quad_func=clenshaw_curtis_simple,
order=order,
domain=domain,
segments=segments,
)
@lru_cache(None)
def clenshaw_curtis_simple(order):
"""
Backend for Clenshaw-Curtis quadrature.
Use :func:`chaospy.quadrature.clenshaw_curtis` instead.
"""
order = int(order)
if order == 0:
return numpy.array([.5]), numpy.array([1.])
elif order == 1:
return numpy.array([0., 1.]), numpy.array([0.5, 0.5])
theta = (order-numpy.arange(order+1))*numpy.pi/order
abscissas = 0.5*numpy.cos(theta)+0.5
steps = numpy.arange(1, order, 2)
length = len(steps)
remains = order-length
beta = numpy.hstack([2./(steps*(steps-2)), [1./steps[-1]], numpy.zeros(remains)])
beta = -beta[:-1]-beta[:0:-1]
gamma = -numpy.ones(order)
gamma[length] += order
gamma[remains] += order
gamma /= (order**2-1+(order%2))
weights = numpy.fft.ihfft(beta+gamma)
assert max(weights.imag) < 1e-15
weights = weights.real
weights = numpy.hstack([weights, weights[len(weights)-2+(order%2)::-1]])/2
return abscissas, weights
|
jonathf/chaospy
|
chaospy/quadrature/clenshaw_curtis.py
|
Python
|
mit
| 3,825
|
[
"Gaussian"
] |
aa378554025dabd2740221c5adf634ef4c71ae66a09f8e23e98f4a8fbbed487f
|
# $Id$
#
# Copyright (c) 2010, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum October 2006
#
from rdkit import RDConfig
import unittest,cPickle,os
from rdkit import Chem
from rdkit.Chem import FunctionalGroups
import os.path
class TestCase(unittest.TestCase):
def test1Basics(self):
txt="""
AcidChloride\tC(=O)Cl\tAcid Chloride
AcidChloride.Benzoyl\tC(=O)(Cl)c1ccccc1\tBenzoyl
Amine\tN\tAmine
Amine.Primary\t[N;H2]\tPrimary
Amine.Primary.Aromatic\t[N;H2][a]\tPrimary Aromatic
Amine.Aromatic\tN[a]\tAromatic
"""
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt)
self.failUnless(hierarchy)
self.failUnless(len(hierarchy)==2)
self.failUnless(len(hierarchy[0])==2)
self.failUnless(len(hierarchy[1])==4)
self.failUnless(hierarchy[0].name=='Acid Chloride')
self.failUnless(hierarchy[0].children[0].name=='Benzoyl')
self.failUnless(hierarchy[0].label=='AcidChloride')
self.failUnless(hierarchy[0].rxnSmarts=='')
m = Chem.MolFromSmiles('ClC(=O)CCCNc1ccccc1')
fp = FunctionalGroups.CreateMolFingerprint(m,hierarchy)
self.failUnless(fp==[1,0,1,0,0,1])
m = Chem.MolFromSmiles('OC(=O)CCC')
fp = FunctionalGroups.CreateMolFingerprint(m,hierarchy)
self.failUnless(fp==[0,0,0,0,0,0])
# make sure we get the same hierarchy on the second call:
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt)
self.failUnless(hierarchy)
self.failUnless(len(hierarchy)==2)
self.failUnless(len(hierarchy[0])==2)
self.failUnless(len(hierarchy[1])==4)
self.failUnless(hierarchy[0].name=='Acid Chloride')
self.failUnless(hierarchy[0].children[0].name=='Benzoyl')
self.failUnless(hierarchy[0].label=='AcidChloride')
self.failUnless(hierarchy[0].rxnSmarts=='')
# if we edit this hierarchy it doesn't affect the global one:
hierarchy.pop(0)
self.failUnless(len(hierarchy[0])==4)
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt)
self.failUnless(hierarchy)
self.failUnless(len(hierarchy)==2)
self.failUnless(len(hierarchy[0])==2)
self.failUnless(len(hierarchy[1])==4)
self.failUnless(hierarchy[0].name=='Acid Chloride')
self.failUnless(hierarchy[0].children[0].name=='Benzoyl')
self.failUnless(hierarchy[0].label=='AcidChloride')
self.failUnless(hierarchy[0].rxnSmarts=='')
# and if we edit the global one and don't force, we get the edited one:
FunctionalGroups.hierarchy.pop(0)
self.failUnless(len(FunctionalGroups.hierarchy[0])==4)
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt)
self.failUnless(hierarchy)
self.failUnless(len(hierarchy)==1)
self.failUnless(len(hierarchy[0])==4)
# but a force gets us back:
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt,force=True)
self.failUnless(len(hierarchy)==2)
self.failUnless(len(hierarchy[0])==2)
self.failUnless(len(hierarchy[1])==4)
def test2Comments(self):
txt="""
AcidChloride\tC(=O)Cl\tAcid Chloride
AcidChloride.Benzoyl\tC(=O)(Cl)c1ccccc1\tBenzoyl
Amine\tN\tAmine
Amine.Primary\t[N;H2]\tPrimary
//Amine.Primary.Aromatic\t[N;H2][a]\tPrimary Aromatic
Amine.Aromatic\tN[a]\tAromatic
"""
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt)
self.failUnless(hierarchy)
self.failUnless(len(hierarchy)==2)
self.failUnless(len(hierarchy[0])==2)
self.failUnless(len(hierarchy[1])==3)
def test3Reactions(self):
txt="""BoronicAcid\t[$(B-!@[#6])](O)(O)\tBoronic Acid\t[#6:1][B:2]([O:3])[O:4]>>[#6:1].[B:2]([O:3])[O:4]
BoronicAcid.Aromatic\t[$(B-!@c)](O)(O)\tAromatic\t[c:1][B:2]([O:3])[O:4]>>[c:1].[B:2]([O:3])[O:4]
BoronicAcid.Aliphatic\t[$(B-!@C)](O)(O)\tAliphatic\t[C:1][B:2]([O:3])[O:4]>>[C:1].[B:2]([O:3])[O:4]
"""
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy(data=txt)
self.failUnless(hierarchy)
self.failUnless(len(hierarchy)==1)
self.failUnless(len(hierarchy[0].children)==2)
self.failUnless(hierarchy[0].rxnSmarts!='')
self.failUnless(hierarchy[0].children[0].rxnSmarts!='')
def test4Hs(self):
hierarchy = FunctionalGroups.BuildFuncGroupHierarchy()
inName = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','NCI_5K_TPSA.csv')
inF = open(inName,'r')
lines = inF.readlines()
ms = [Chem.MolFromSmiles(x.split(',')[0]) for x in lines if x[0]!='#']
for m in ms:
mh =Chem.AddHs(m)
fp = FunctionalGroups.CreateMolFingerprint(m,hierarchy)
fph = FunctionalGroups.CreateMolFingerprint(mh,hierarchy)
if fp!=fph:
print fp.ToBitString()
print fph.ToBitString()
self.failUnlessEqual(fp,fph)
if __name__ == '__main__':
import sys,getopt,re
doLong=0
if len(sys.argv) >1:
args,extras=getopt.getopt(sys.argv[1:],'l')
for arg,val in args:
if arg=='-l':
doLong=1
sys.argv.remove('-l')
if doLong:
for methName in dir(TestCase):
if re.match('_test',methName):
newName = re.sub('_test','test',methName)
exec('TestCase.%s = TestCase.%s'%(newName,methName))
unittest.main()
|
rdkit/rdkit-orig
|
rdkit/Chem/UnitTestFunctionalGroups.py
|
Python
|
bsd-3-clause
| 6,699
|
[
"RDKit"
] |
10ca4b17838dd3d55cceff2e831d8671d80a7e76b667e50a9a03775f47058ae5
|
#!/usr/bin/env python
# encoding: utf-8
# Stdlib:
from time import time
# Internal:
from gi.repository import Moose
# External:
from gi.repository import GLib
class Heartbeat:
'''Count the elapsed song time without querying MPD.
It simply looks for signals and calculates the current song position on
accessing ``elapsed``.
.. note::
For pedantics: Heartbeat assumes it was instanced near to a status
update. If the last update was long ago it will return slightly wrong
results till next update. You will likely never notice in reality.
:client: A Client object.
'''
def __init__(self, client, use_listened_counter=True):
self._client = client
self._last_song_queue_pos = -1
self._last_update_tmstp = self._current_time_ms()
self._interval = 200
self._curr_listened = self._last_listened = self._last_duration = 0.0
self._client.connect(
'client-event',
self._on_client_event,
)
if use_listened_counter:
self._last_tick = self._current_time_ms()
GLib.timeout_add(self._interval, self._on_poll_elapsed)
@staticmethod
def format_minutes(seconds):
minutes = int(seconds / 60)
seconds = int(seconds % 60)
return '{:02d}:{:02d}'.format(minutes, seconds)
def _on_poll_elapsed(self):
'Update every timeslice the player is running a counter'
if not self._client.is_connected:
return 0.0
now = self._current_time_ms()
with self._client.reffed_status() as status:
if status and status.get_state() is Moose.State.PLAYING:
self._curr_listened += now - self._last_tick
with self._client.reffed_current_song() as song:
if song is not None:
self._last_duration = song.get_duration()
self._last_tick = now
return True
@property
def currently_listened_percent(self):
'''The percent of the song that was actually listened to.
This will is resistant against seeking in the song, therefore
more than 100 percent is possible (imagine listening the whole song
and skipping back to the beginning).
'''
secs = self._curr_listened / (1000 / self._interval * self._interval)
duration = self._last_duration
if duration != 0.0:
return secs / duration
return 0
@property
def last_listened_percent(self):
'''Same as ``currently_listened_percent``, but for the last listened song.
This is provided for your convinience
'''
return self._last_listened
@property
def elapsed(self):
'''Returns the approx. number of elapsed seconds of the currently playing song
:returns: The number of seconds as a float, with milliseconds fraction
'''
if not self._client.is_connected:
return 0.0
elapsed = 0
with self._client.reffed_status() as status:
if status is not None:
if status.get_state() is Moose.State.PLAYING:
offset = self._current_time_ms() - self._last_update_tmstp
else:
offset = 0
elapsed = (status.get_elapsed_ms() + offset) / 1000.0
return elapsed
@property
def duration(self):
'''Returns the approx. duration of the currently playing song
:returns: The number of seconds as a float, with milliseconds fraction
'''
if not self._client.is_connected:
return 0.0
duration = 0
with self._client.reffed_current_song() as song:
if song is not None:
duration = song.get_duration()
return duration
@property
def percent(self):
'''Convinience property.
Returns self.elapsed / self.duration
'''
duration = self.duration
if duration != 0.0:
return self.elapsed / duration
return 0
def _on_client_event(self, client, event):
'client-event callback - updates the update timestamp'
if not event & (Moose.Idle.PLAYER | Moose.Idle.SEEK):
return
song_queue_pos = -1
with client.reffed_current_song() as song:
if song is not None:
song_queue_pos = song.get_pos()
if self._last_song_queue_pos != song_queue_pos:
self._last_listened = self.currently_listened_percent
self._curr_listened = self.elapsed * 1000.0
self._last_song_queue_pos = song_queue_pos
self._last_update_tmstp = self._current_time_ms()
def _current_time_ms(self):
return time() * 1000
if __name__ == '__main__':
client = Moose.CmdClient()
client.connect_to("localhost", 6600, 200)
hb = Heartbeat(client)
def timeout_callback():
print(
'elapsed={:3.3f} percent={:3.3f} curr={:3.3f} last={:3.3f}'.format(
hb.elapsed,
hb.percent,
hb.currently_listened_percent,
hb.last_listened_percent
)
)
return True
GLib.timeout_add(500, timeout_callback)
try:
GLib.MainLoop().run()
except KeyboardInterrupt:
pass
|
studentkittens/snobaer
|
snobaer/heartbeat.py
|
Python
|
lgpl-3.0
| 5,354
|
[
"MOOSE"
] |
bfe71c1a26eb1e45e0cce9bb110e9de905b5fd4116da014b7c6cbb1b8c199bdb
|
""" GraphData encapsulates input data for the DIRAC Graphs plots
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import time
import datetime
import numpy
from matplotlib.dates import date2num
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pretty_float
DEBUG = 0
def get_key_type( keys ):
""" A utility function to guess the type of the plot keys
"""
min_time_stamp = 1000000000
max_time_stamp = 1900000000
time_type = True
num_type = True
string_type = True
key_type = 'unknown'
for key in keys:
if time_type:
try:
time_data = to_timestamp( key )
if time_data < min_time_stamp or time_data > max_time_stamp:
time_type = False
except ValueError:
time_type = False
if num_type:
try:
num_data = float( key )
except:
num_type = False
if not isinstance(key, basestring):
string_type = False
# Take the most restrictive type
if string_type:
key_type = "string"
if num_type :
key_type = "numeric"
if time_type:
key_type = "time"
return key_type
class GraphData:
def __init__( self, data = {} ):
self.truncated = 0
self.all_keys = []
self.labels = []
self.label_values = []
self.subplots = {}
self.plotdata = None
self.data = dict( data )
self.key_type = 'string'
self.initialize()
def isEmpty( self ):
""" Check if there is no data inserted
"""
return not self.plotdata and not self.subplots
def setData( self, data ):
""" Add data to the GraphData object
"""
self.data = dict( data )
self.initialize()
def initialize( self, key_type = None ):
keys = self.data.keys()
if not keys:
print "GraphData Error: empty data"
start = time.time()
if isinstance( self.data[keys[0]], dict ):
for key in self.data:
self.subplots[key] = PlotData( self.data[key], key_type = key_type )
else:
self.plotdata = PlotData( self.data, key_type = key_type )
if DEBUG:
print "Time: plot data", time.time() - start, len( self.subplots )
if self.plotdata:
self.all_keys = self.plotdata.getKeys()
else:
tmpset = set()
for sub in self.subplots.values():
for key in sub.getKeys():
tmpset.add( key )
self.all_keys = list( tmpset )
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type( self.all_keys )
self.sortKeys()
self.makeNumKeys()
self.sortLabels()
def expandKeys( self ):
if not self.plotdata:
for sub in self.subplots:
self.subplots[sub].expandKeys( self.all_keys )
def isSimplePlot( self ):
return not self.plotdata is None
def sortLabels( self, sort_type = 'max_value', reverse_order=False ):
""" Sort labels with a specified method:
alpha - alphabetic order
max_value - by max value of the subplot
sum - by the sum of values of the subplot
last_value - by the last value in the subplot
avg_nozeros - by an average that excludes all zero values
"""
if self.plotdata:
if self.key_type == "string":
if sort_type in ['max_value', 'sum']:
self.labels = self.plotdata.sortKeys( 'weight' )
else:
self.labels = self.plotdata.sortKeys()
if reverse_order:
self.labels.reverse()
self.label_values = [ self.plotdata.parsed_data[l] for l in self.labels]
else:
if sort_type == 'max_value':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].max_value, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].max_value for x in pairs ]
elif sort_type == 'last_value':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].last_value, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].last_value for x in pairs ]
elif sort_type == 'sum':
pairs = []
for key in self.subplots:
pairs.append( ( key, self.subplots[key].sum_value ) )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1], reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1] for x in pairs ]
elif sort_type == 'alpha':
self.labels = self.subplots.keys()
self.labels.sort()
if reverse_order:
self.labels.reverse()
self.label_values = [ self.subplots[x].sum_value for x in self.labels ]
elif sort_type == 'avg_nozeros':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].avg_nozeros, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].avg_nozeros for x in pairs ]
else:
self.labels = self.subplots.keys()
if reverse_order:
self.labels.reverse()
def sortKeys( self ):
""" Sort the graph keys in a natural order
"""
if self.plotdata:
self.plotdata.sortKeys()
self.all_keys = self.plotdata.getKeys()
else:
self.all_keys.sort()
self.min_key = min( self.all_keys )
self.max_key = max( self.all_keys )
def makeNumKeys( self ):
""" Make numerical representation of the graph keys suitable for plotting
"""
self.all_num_keys = []
if self.key_type == "string":
self.all_string_map = {}
next = 0
for key in self.all_keys:
self.all_string_map[key] = next
self.all_num_keys.append( next )
next += 1
elif self.key_type == "time":
self.all_num_keys = [ date2num( datetime.datetime.fromtimestamp( to_timestamp( key ) ) ) for key in self.all_keys ]
elif self.key_type == "numeric":
self.all_num_keys = [ float( key ) for key in self.all_keys ]
self.min_num_key = min( self.all_num_keys )
self.max_num_key = max( self.all_num_keys )
def makeCumulativeGraph( self ):
""" Prepare data for the cumulative graph
"""
self.expandKeys()
if self.plotdata:
self.plotdata.makeCumulativePlot()
if self.truncated:
self.otherPlot.makeCumulativePlot()
if self.subplots:
for label in self.subplots:
self.subplots[label].makeCumulativePlot()
self.sortLabels( sort_type = 'last_value' )
def getLabels( self ):
""" Get the graph labels together with the numeric values used for the label
sorting
"""
labels = []
if self.plotdata:
if self.key_type != 'string':
labels = [( 'NoLabels', 0. )]
else:
labels = zip( self.labels, self.label_values )
elif self.truncated:
tlabels = self.labels[:self.truncated]
tvalues = self.label_values[:self.truncated]
labels = zip( tlabels, tvalues )
labels.append( ( 'Others', sum( self.label_values[self.truncated:] ) ) )
else:
labels = zip( self.labels, self.label_values )
return labels
def getStringMap( self ):
""" Get string to number mapping for numeric type keys
"""
return self.all_string_map
def getNumberOfKeys( self ):
return len( self.all_keys )
def getNumberOfLabels( self ):
if self.truncated:
return self.truncated + 1
else:
return len( self.labels )
def getPlotNumData( self, label = None, zipFlag = True ):
""" Get the plot data in a numeric form
"""
if self.plotdata:
if zipFlag:
return zip( self.plotdata.getNumKeys(), self.plotdata.getValues(), self.plotdata.getPlotErrors() )
else:
return self.plotdata.getValues()
elif label is not None:
if label == "Others":
return self.otherPlot.getPlotDataForNumKeys( self.all_num_keys )
else:
return self.subplots[label].getPlotDataForNumKeys( self.all_num_keys )
else:
# Get the sum of all the subplots
self.expandKeys()
arrays = []
for label in self.subplots:
arrays.append( numpy.array( [ x[1] for x in self.subplots[label].getPlotDataForNumKeys( self.all_num_keys, True )] ) )
sum_array = sum( arrays )
if zipFlag:
return zip( self.all_num_keys, list( sum_array ) )
else:
return sum_array
def truncateLabels( self, limit = 10 ):
""" Truncate the number of labels to the limit, leave the most important
ones, accumulate the rest in the 'Others' label
"""
if self.plotdata:
return
nLabels = len( self.labels )
if nLabels <= limit:
return
self.truncated = limit
new_labels = self.labels[:limit]
new_labels.append( 'Others' )
other_data = {}
for key in self.all_keys:
other_data[key] = 0.
for label in self.labels:
if label not in new_labels:
for key in self.all_keys:
if self.subplots[label].parsed_data.has_key( key ):
other_data[key] += self.subplots[label].parsed_data[key]
self.otherPlot = PlotData( other_data )
def getStats( self ):
""" Get statistics of the graph data
"""
numData = self.getPlotNumData( zipFlag = False )
if not len( numData ):
return 0, 0, 0, 0
numData = numpy.array( numData )
min_value = numData.min()
max_value = numData.max()
average = float( numData.sum() ) / len( numData )
current = numData[-1]
return min_value, max_value, average, current
def getStatString( self, unit = None ):
""" Get a string summarizing the graph data statistics
"""
min_value, max_value, average, current = self.getStats()
tmpList = []
unitString = ''
if unit:
unitString = str( unit )
if max_value:
try:
s = "Max: " + pretty_float( max_value ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if min_value:
try:
s = "Min: " + pretty_float( min_value ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if average:
try:
s = "Average: " + pretty_float( average ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if current:
try:
s = "Current: " + pretty_float( current ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
resultString = ', '.join( tmpList )
return resultString
class PlotData:
""" PlotData class is a container for a one dimensional plot data
"""
def __init__( self, data, single = True, key_type = None ):
self.key_type = "unknown"
keys = data.keys()
if not keys:
print "PlotData Error: empty data"
return
# Original data
self.data = dict( data )
# Working copy of the parsed data
self.parsed_data = {}
self.parsed_errors = {}
# Keys and values as synchronized lists
self.keys = []
self.num_keys = []
self.values = []
self.errors = []
self.sorted_keys = []
# Do initial data parsing
self.parseData( key_type )
if single:
self.initialize()
def initialize( self ):
if self.key_type == "string":
self.keys = self.sortKeys( 'weight' )
else:
self.keys = self.sortKeys()
self.values = [ self.parsed_data.get(k, 0.0) for k in self.keys ]
self.errors = [ self.parsed_errors.get(k, 0.0) for k in self.keys ]
values_to_sum = [ self.parsed_data.get(k, 0.0) for k in self.keys if k != '' ]
self.real_values = []
for k in self.keys:
if self.parsed_data[k] is not None:
self.real_values.append( self.parsed_data[k] )
self.values_sum = float( sum( self.real_values ) )
# Prepare numerical representation of keys for plotting
self.num_keys = []
if self.key_type == "string":
self.string_map = {}
next = 0
for key in self.keys:
self.string_map[key] = next
self.num_keys.append( next )
next += 1
elif self.key_type == "time":
self.num_keys = [ date2num( datetime.datetime.fromtimestamp( to_timestamp( key ) ) ) for key in self.keys ]
elif self.key_type == "numeric":
self.num_keys = [ float( key ) for key in self.keys ]
self.min_value = float( min( self.real_values ) )
self.max_value = float( max( self.real_values ) )
self.min_key = self.keys[0]
self.max_key = self.keys[-1]
self.sum_value = float( sum( self.real_values ) )
self.last_value = float( self.real_values[-1] )
count = len( filter(lambda a: a != 0, self.real_values) )
if count != 0:
self.avg_nozeros = self.sum_value / float( count )
else:
self.avg_nozeros = 0
def expandKeys( self, all_keys ):
""" Fill zero values into the missing keys
"""
for k in all_keys:
if not self.parsed_data.has_key( k ):
self.parsed_data[k] = 0.
self.sorted_keys = []
self.keys = self.parsed_data.keys()
self.initialize()
def sortKeys( self, sort_type = 'alpha' ):
""" Sort keys according to the specified method :
alpha - sort in alphabetic order
weight - sort in the order of values
"""
if self.sorted_keys:
return self.sorted_keys
if sort_type == 'weight':
pairs = zip( self.parsed_data.keys(), self.parsed_data.values() )
pairs.sort( key = lambda x: x[1], reverse = True )
self.sorted_keys = [ x[0] for x in pairs ]
elif sort_type == 'alpha':
self.sorted_keys = self.keys
self.sorted_keys.sort()
else:
print "Unknown sorting type:", sort_type
return self.sorted_keys
def __data_size( self, item ):
"""
Determine a numerical size for the data; this is used to
sort the keys of the graph.
If the item is a tuple, take the absolute value of the first entry.
Otherwise, attempt to take the absolute value of that item. If that
fails, just return -1.
"""
if isinstance(item, tuple):
return abs( item[0] )
try:
return abs( item )
except TypeError, te:
return - 1
def parseKey( self, key ):
"""
Parse the name of the pivot; this is the identity function.
"""
if self.key_type == "time":
return to_timestamp( key )
else:
return key
def parseDatum( self, data ):
"""
Parse the specific data value; this is the identity.
"""
if isinstance(data, basestring) and "::" in data:
datum,error = data.split("::")
elif isinstance(data, tuple):
datum,error = data
else:
error = 0.
datum = data
try:
resultD = float( datum )
except:
resultD = None
try:
resultE = float( error )
except:
resultE = None
return ( resultD, resultE )
def parseData( self, key_type = None ):
"""
Parse all the data values passed to the graph. For this super class,
basically does nothing except loop through all the data. A sub-class
should override the parseDatum and parse_pivot functions rather than
this one.
"""
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type( self.data.keys() )
new_parsed_data = {}
new_passed_errors = {}
for key, data in self.data.items():
new_key = self.parseKey( key )
data,error = self.parseDatum( data )
#if data != None:
new_parsed_data[ new_key ] = data
new_passed_errors[ new_key ] = error
self.parsed_data = new_parsed_data
self.parsed_errors = new_passed_errors
self.keys = self.parsed_data.keys()
def makeCumulativePlot( self ):
if not self.sorted_keys:
self.sortKeys()
cum_values = []
if self.values[0] is None:
cum_values.append( 0. )
else:
cum_values.append( self.values[0] )
for i in range( 1, len( self.values ) ):
if self.values[i] is None:
cum_values.append( cum_values[i - 1] )
else:
cum_values.append( cum_values[i - 1] + self.values[i] )
self.values = cum_values
self.last_value = float( self.values[-1] )
def getPlotData( self ):
return self.parsed_data
def getPlotErrors( self ):
return self.parsed_errors
def getPlotNumData( self ):
return zip( self.num_keys, self.values, self.errors )
def getPlotDataForKeys( self, keys ):
result_pairs = []
for key in keys:
if self.parsed_data.has_key( key ):
result_pairs.append( key, self.parsed_data[key], self.parsed_errors[key] )
else:
result_pairs.append( key, None, 0. )
return result_pairs
def getPlotDataForNumKeys( self, num_keys, zeroes = False ):
result_pairs = []
for num_key in num_keys:
try:
ind = self.num_keys.index( num_key )
if self.values[ind] is None and zeroes:
result_pairs.append( ( self.num_keys[ind], 0., 0. ) )
else:
result_pairs.append( ( self.num_keys[ind], self.values[ind], self.errors[ind] ) )
except ValueError:
if zeroes:
result_pairs.append( ( num_key, 0., 0. ) )
else:
result_pairs.append( ( num_key, None, 0. ) )
return result_pairs
def getKeys( self ):
return self.keys
def getNumKeys( self ):
return self.num_keys
def getValues( self ):
return self.values
def getMaxValue( self ):
return max( self.values )
def getMinValue( self ):
return min( self.values )
|
hgiemza/DIRAC
|
Core/Utilities/Graphs/GraphData.py
|
Python
|
gpl-3.0
| 17,723
|
[
"DIRAC"
] |
9d98c0859bdc7851b3cf2f1907a887c3bf771a8629cb83802f926015360bbb5e
|
"""
netParams.py
High-level specifications for M1 network model using NetPyNE
Contributors: salvadordura@gmail.com
"""
from netpyne import specs
import pickle, json
netParams = specs.NetParams() # object of class NetParams to store the network parameters
netParams.version = 49
try:
from __main__ import cfg # import SimConfig object with params from parent module
except:
from cfg import cfg
#------------------------------------------------------------------------------
#
# NETWORK PARAMETERS
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# General connectivity parameters
#------------------------------------------------------------------------------
netParams.defaultThreshold = 0.0 # spike threshold, 10 mV is NetCon default, lower it for all cells
netParams.defaultDelay = 2.0 # default conn delay (ms)
netParams.propVelocity = 500.0 # propagation velocity (um/ms)
netParams.probLambda = 100.0 # length constant (lambda) for connection probability decay (um)
netParams.sizeX = 100
netParams.sizeY = 1350 # cortical depth (will be converted to negative values)
netParams.sizeZ = 100
#------------------------------------------------------------------------------
# Cell parameters
#------------------------------------------------------------------------------
cellModels = ['HH_simple', 'HH_reduced', 'HH_full']
layer = {'2': [0.12,0.31], '4': [0.31,0.42], '5A': [0.42,0.52], '45A':[0.31,0.52], '5B': [0.52,0.77], '6': [0.77,1.0], 'long': [2.0,3.0]} # normalized layer boundaries
#------------------------------------------------------------------------------
## Load cell rules previously saved using netpyne format
cellParamLabels = [] # ['PT5B_full'] # # list of cell rules to load from file
loadCellParams = cellParamLabels
saveCellParams = False #True
for ruleLabel in loadCellParams:
netParams.loadCellParamsRule(label=ruleLabel, fileName='cells/'+ruleLabel+'_cellParams.pkl')
#------------------------------------------------------------------------------
# Specification of cell rules not previously loaded
# Includes importing from hoc template or python class, and setting additional params
#------------------------------------------------------------------------------
## PT5B full cell model params (700+ comps)
if 'PT5B_full' not in loadCellParams:
ihMod2str = {'harnett': 1, 'kole': 2, 'migliore': 3}
cellRule = netParams.importCellParams(label='PT5B_full', conds={'cellType': 'PT', 'cellModel': 'HH_full'},
fileName='cells/PTcell.hoc', cellName='PTcell', cellArgs=[ihMod2str[cfg.ihModel], cfg.ihSlope], somaAtOrigin=True)
nonSpiny = ['apic_0', 'apic_1']
netParams.addCellParamsSecList(label='PT5B_full', secListName='perisom', somaDist=[0, 50]) # sections within 50 um of soma
netParams.addCellParamsSecList(label='PT5B_full', secListName='below_soma', somaDistY=[-600, 0]) # sections within 0-300 um of soma
for sec in nonSpiny: cellRule['secLists']['perisom'].remove(sec)
cellRule['secLists']['alldend'] = [sec for sec in cellRule.secs if ('dend' in sec or 'apic' in sec)] # basal+apical
cellRule['secLists']['apicdend'] = [sec for sec in cellRule.secs if ('apic' in sec)] # apical
cellRule['secLists']['spiny'] = [sec for sec in cellRule['secLists']['alldend'] if sec not in nonSpiny]
# Adapt ih params based on cfg param
for secName in cellRule['secs']:
for mechName,mech in cellRule['secs'][secName]['mechs'].items():
if mechName in ['ih','h','h15', 'hd']:
mech['gbar'] = [g*cfg.ihGbar for g in mech['gbar']] if isinstance(mech['gbar'],list) else mech['gbar']*cfg.ihGbar
if cfg.ihModel == 'migliore':
mech['clk'] = cfg.ihlkc # migliore's shunt current factor
mech['elk'] = cfg.ihlke # migliore's shunt current reversal potential
if secName.startswith('dend'):
mech['gbar'] *= cfg.ihGbarBasal # modify ih conductance in soma+basal dendrites
mech['clk'] *= cfg.ihlkcBasal # modify ih conductance in soma+basal dendrites
if secName in cellRule['secLists']['below_soma']: #secName.startswith('dend'):
mech['clk'] *= cfg.ihlkcBelowSoma # modify ih conductance in soma+basal dendrites
# Reduce dend Na to avoid dend spikes (compensate properties by modifying axon params)
for secName in cellRule['secLists']['alldend']:
cellRule['secs'][secName]['mechs']['nax']['gbar'] = 0.0153130368342 * cfg.dendNa # 0.25
cellRule['secs']['soma']['mechs']['nax']['gbar'] = 0.0153130368342 * cfg.somaNa
cellRule['secs']['axon']['mechs']['nax']['gbar'] = 0.0153130368342 * cfg.axonNa # 11
cellRule['secs']['axon']['geom']['Ra'] = 137.494564931 * cfg.axonRa # 0.005
# Remove Na (TTX)
if cfg.removeNa:
for secName in cellRule['secs']: cellRule['secs'][secName]['mechs']['nax']['gbar'] = 0.0
netParams.addCellParamsWeightNorm('PT5B_full', 'conn/PT5B_full_weightNorm.pkl', threshold=cfg.weightNormThreshold) # load weight norm
if saveCellParams:
netParams.saveCellParamsRule(label='PT5B_full', fileName='cells/PT5B_full_cellParams.pkl')
#------------------------------------------------------------------------------
# Population parameters
#------------------------------------------------------------------------------
netParams.popParams['PT5B'] = {'cellModel': 'HH_full', 'cellType': 'PT', 'ynormRange': layer['5B'], 'numCells':1}
#------------------------------------------------------------------------------
# Synaptic mechanism parameters
#------------------------------------------------------------------------------
netParams.synMechParams['NMDA'] = {'mod': 'MyExp2SynNMDABB', 'tau1NMDA': 15, 'tau2NMDA': 150, 'e': 0}
netParams.synMechParams['AMPA'] = {'mod':'MyExp2SynBB', 'tau1': 0.05, 'tau2': 5.3*cfg.AMPATau2Factor, 'e': 0}
netParams.synMechParams['GABAB'] = {'mod':'MyExp2SynBB', 'tau1': 3.5, 'tau2': 260.9, 'e': -93}
netParams.synMechParams['GABAA'] = {'mod':'MyExp2SynBB', 'tau1': 0.07, 'tau2': 18.2, 'e': -80}
netParams.synMechParams['GABAASlow'] = {'mod': 'MyExp2SynBB','tau1': 2, 'tau2': 100, 'e': -80}
ESynMech = ['AMPA', 'NMDA']
SOMESynMech = ['GABAASlow','GABAB']
PVSynMech = ['GABAA']
#------------------------------------------------------------------------------
# Current inputs (IClamp)
#------------------------------------------------------------------------------
if cfg.addIClamp:
for key in [k for k in dir(cfg) if k.startswith('IClamp')]:
params = getattr(cfg, key, None)
[pop,sec,loc,start,dur,amp] = [params[s] for s in ['pop','sec','loc','start','dur','amp']]
#cfg.analysis['plotTraces']['include'].append((pop,0)) # record that pop
# add stim source
netParams.stimSourceParams[key] = {'type': 'IClamp', 'delay': start, 'dur': dur, 'amp': amp}
# connect stim source to target
netParams.stimTargetParams[key+'_'+pop] = {
'source': key,
'conds': {'pop': pop},
'sec': sec,
'loc': loc}
#------------------------------------------------------------------------------
# NetStim inputs
#------------------------------------------------------------------------------
if cfg.addNetStim:
for key in [k for k in dir(cfg) if k.startswith('NetStim')]:
params = getattr(cfg, key, None)
[pop, ynorm, sec, loc, synMech, synMechWeightFactor, start, interval, noise, number, weight, delay] = \
[params[s] for s in ['pop', 'ynorm', 'sec', 'loc', 'synMech', 'synMechWeightFactor', 'start', 'interval', 'noise', 'number', 'weight', 'delay']]
if synMech == ESynMech:
wfrac = cfg.synWeightFractionEE
elif synMech == SOMESynMech:
wfrac = cfg.synWeightFractionSOME
else:
wfrac = [1.0]
# add stim source
netParams.stimSourceParams[key] = {'type': 'NetStim', 'start': start, 'interval': interval, 'noise': noise, 'number': number}
# connect stim source to target
# for i, syn in enumerate(synMech):
netParams.stimTargetParams[key+'_'+pop] = {
'source': key,
'conds': {'pop': pop, 'ynorm': ynorm},
'sec': sec,
'loc': loc,
'synMech': synMech,
'weight': weight,
'synMechWeightFactor': synMechWeightFactor,
'delay': delay}
#------------------------------------------------------------------------------
# Subcellular connectivity (synaptic distributions)
#------------------------------------------------------------------------------
if cfg.addSubConn:
with open('conn/conn_dend_PT.json', 'rb') as fileObj: connDendPTData = json.load(fileObj)
#------------------------------------------------------------------------------
# Use subcellular distribution from L2/3 -> PT (Suter, 2015)
lenY = 30
spacing = 50
gridY = list(range(0, -spacing*lenY, -spacing))
synDens, _, fixedSomaY = connDendPTData['synDens'], connDendPTData['gridY'], connDendPTData['fixedSomaY']
netParams.subConnParams['L2->PT'] = {
'preConds': {'cellType': 'NetStim'}, # all presyn inputs
'postConds': {'cellType': 'PT5B'},
'sec': 'spiny',
'groupSynMechs': ESynMech,
'density': {'type': '1Dmap', 'gridX': None, 'gridY': gridY, 'gridValues': synDens['L2_PT'], 'fixedSomaY': fixedSomaY}}
|
thekerrlab/netpyne
|
examples/PTcell/netParams.py
|
Python
|
mit
| 9,565
|
[
"Elk"
] |
59555a2e1e6418dcb8e83ca944ecf7eef194fa589bdd92e8ec710007a6097a6e
|
import re
import os
import json
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse, TextResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
from decimal import Decimal
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class SwisscomSpider(BaseSpider):
name = 'swisscom.ch'
allowed_domains = ['swisscom.ch', 'www.swisscom.ch']
start_urls = (u'http://www.swisscom.ch/en/residential/mobile/devices/samsung-galaxy-siii-16gb-pebble-blue.html', )
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select(u'//div[@class="description"]/h2/text()').extract()[0]
product_id = hxs.select(u'//form[@class="scs-form" and @action="/en/residential/konfiguration.html"]/input[@name="productId"]/@value').extract()[0]
req_url = u'http://www.swisscom.ch/PortalShop/en/Configuration/DrawAbo'
formdata = {'AboDuration': '24',
'HasOptions': 'False',
'HeaderProductId': product_id,
'PremiumDiscount': '0',
'PreselectedAbo': 'False',
'PreselectedCase': 'False',
'ProductId': product_id,
'ShopItem.Id': product_id,
'SubscriptionCase': 'New'}
yield FormRequest(req_url, formdata=formdata, meta={'product_url': response.request.url, 'product_name': name}, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, TextResponse):
return
res = json.loads(response.body)
hxs = HtmlXPathSelector(text=res['Html'])
for option in hxs.select(u'//input'):
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', response.meta['product_url'])
abo_name = option.select(u'./@data-display-text')[0].extract().strip()
name = u'%s %s' % (response.meta['product_name'], abo_name)
loader.add_value('name', name)
price = hxs.select(u'//label[@for="%s"]/span[@class="chf-price"]/text()' % option.select(u'./@id').extract()[0])[0].extract()
price = price.replace(u'\u2014', u'')
loader.add_value('price', price)
# if loader.get_output_value('price'):
yield loader.load_item()
|
0--key/lib
|
portfolio/Python/scrapy/orange/swisscom.py
|
Python
|
apache-2.0
| 2,683
|
[
"Galaxy"
] |
f7c3378668534a85d8226d189e60fd96e332690d8f3bc33d9cad371839ece80e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# From https://gist.github.com/lepture/2011858
#
# Copyright (c) 2012, lepture.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
emoji_url = 'http://a248.e.akamai.net/assets.github.com/images/icons/emoji/'
emoji_list = [
"-1", "0", "1", "109", "2", "3", "4", "5", "6", "7", "8", "8ball", "9",
"a", "ab", "airplane", "alien", "ambulance", "angel", "anger", "angry",
"apple", "aquarius", "aries", "arrow_backward", "arrow_down",
"arrow_forward", "arrow_left", "arrow_lower_left", "arrow_lower_right",
"arrow_right", "arrow_up", "arrow_upper_left", "arrow_upper_right",
"art", "astonished", "atm", "b", "baby", "baby_chick", "baby_symbol",
"balloon", "bamboo", "bank", "barber", "baseball", "basketball", "bath",
"bear", "beer", "beers", "beginner", "bell", "bento", "bike", "bikini",
"bird", "birthday", "black_square", "blue_car", "blue_heart", "blush",
"boar", "boat", "bomb", "book", "boot", "bouquet", "bow", "bowtie",
"boy", "bread", "briefcase", "broken_heart", "bug", "bulb",
"bullettrain_front", "bullettrain_side", "bus", "busstop", "cactus",
"cake", "calling", "camel", "camera", "cancer", "capricorn", "car",
"cat", "cd", "chart", "checkered_flag", "cherry_blossom", "chicken",
"christmas_tree", "church", "cinema", "city_sunrise", "city_sunset",
"clap", "clapper", "clock1", "clock10", "clock11", "clock12", "clock2",
"clock3", "clock4", "clock5", "clock6", "clock7", "clock8", "clock9",
"closed_umbrella", "cloud", "clubs", "cn", "cocktail", "coffee",
"cold_sweat", "computer", "confounded", "congratulations",
"construction", "construction_worker", "convenience_store", "cool",
"cop", "copyright", "couple", "couple_with_heart", "couplekiss", "cow",
"crossed_flags", "crown", "cry", "cupid", "currency_exchange", "curry",
"cyclone", "dancer", "dancers", "dango", "dart", "dash", "de",
"department_store", "diamonds", "disappointed", "dog", "dolls",
"dolphin", "dress", "dvd", "ear", "ear_of_rice", "egg", "eggplant",
"egplant", "eight_pointed_black_star", "eight_spoked_asterisk",
"elephant", "email", "es", "european_castle", "exclamation", "eyes",
"factory", "fallen_leaf", "fast_forward", "fax", "fearful", "feelsgood",
"feet", "ferris_wheel", "finnadie", "fire", "fire_engine", "fireworks",
"fish", "fist", "flags", "flushed", "football", "fork_and_knife",
"fountain", "four_leaf_clover", "fr", "fries", "frog", "fuelpump", "gb",
"gem", "gemini", "ghost", "gift", "gift_heart", "girl", "goberserk",
"godmode", "golf", "green_heart", "grey_exclamation", "grey_question",
"grin", "guardsman", "guitar", "gun", "haircut", "hamburger", "hammer",
"hamster", "hand", "handbag", "hankey", "hash", "headphones", "heart",
"heart_decoration", "heart_eyes", "heartbeat", "heartpulse", "hearts",
"hibiscus", "high_heel", "horse", "hospital", "hotel", "hotsprings",
"house", "hurtrealbad", "icecream", "id", "ideograph_advantage", "imp",
"information_desk_person", "iphone", "it", "jack_o_lantern",
"japanese_castle", "joy", "jp", "key", "kimono", "kiss", "kissing_face",
"kissing_heart", "koala", "koko", "kr", "leaves", "leo", "libra", "lips",
"lipstick", "lock", "loop", "loudspeaker", "love_hotel", "mag",
"mahjong", "mailbox", "man", "man_with_gua_pi_mao", "man_with_turban",
"maple_leaf", "mask", "massage", "mega", "memo", "mens", "metal",
"metro", "microphone", "minidisc", "mobile_phone_off", "moneybag",
"monkey", "monkey_face", "moon", "mortar_board", "mount_fuji", "mouse",
"movie_camera", "muscle", "musical_note", "nail_care", "necktie", "new",
"no_good", "no_smoking", "nose", "notes", "o", "o2", "ocean", "octocat",
"octopus", "oden", "office", "ok", "ok_hand", "ok_woman", "older_man",
"older_woman", "open_hands", "ophiuchus", "palm_tree", "parking",
"part_alternation_mark", "pencil", "penguin", "pensive", "persevere",
"person_with_blond_hair", "phone", "pig", "pill", "pisces", "plus1",
"point_down", "point_left", "point_right", "point_up", "point_up_2",
"police_car", "poop", "post_office", "postbox", "pray", "princess",
"punch", "purple_heart", "question", "rabbit", "racehorse", "radio",
"rage", "rage1", "rage2", "rage3", "rage4", "rainbow", "raised_hands",
"ramen", "red_car", "red_circle", "registered", "relaxed", "relieved",
"restroom", "rewind", "ribbon", "rice", "rice_ball", "rice_cracker",
"rice_scene", "ring", "rocket", "roller_coaster", "rose", "ru", "runner",
"sa", "sagittarius", "sailboat", "sake", "sandal", "santa", "satellite",
"satisfied", "saxophone", "school", "school_satchel", "scissors",
"scorpius", "scream", "seat", "secret", "shaved_ice", "sheep", "shell",
"ship", "shipit", "shirt", "shit", "shoe", "signal_strength",
"six_pointed_star", "ski", "skull", "sleepy", "slot_machine", "smile",
"smiley", "smirk", "smoking", "snake", "snowman", "sob", "soccer",
"space_invader", "spades", "spaghetti", "sparkler", "sparkles",
"speaker", "speedboat", "squirrel", "star", "star2", "stars", "station",
"statue_of_liberty", "stew", "strawberry", "sunflower", "sunny",
"sunrise", "sunrise_over_mountains", "surfer", "sushi", "suspect",
"sweat", "sweat_drops", "swimmer", "syringe", "tada", "tangerine",
"taurus", "taxi", "tea", "telephone", "tennis", "tent", "thumbsdown",
"thumbsup", "ticket", "tiger", "tm", "toilet", "tokyo_tower", "tomato",
"tongue", "top", "tophat", "traffic_light", "train", "trident",
"trollface", "trophy", "tropical_fish", "truck", "trumpet", "tshirt",
"tulip", "tv", "u5272", "u55b6", "u6307", "u6708", "u6709", "u6e80",
"u7121", "u7533", "u7a7a", "umbrella", "unamused", "underage", "unlock",
"up", "us", "v", "vhs", "vibration_mode", "virgo", "vs", "walking",
"warning", "watermelon", "wave", "wc", "wedding", "whale", "wheelchair",
"white_square", "wind_chime", "wink", "wink2", "wolf", "woman",
"womans_hat", "womens", "x", "yellow_heart", "zap", "zzz", "+1"
]
def emoji(text):
pattern = re.compile(':([a-z0-9\+\-_]+):')
def make_emoji(m):
name = m.group(1)
if name not in emoji_list:
return ':%s:' % name
tpl = ('<img class="emoji" title="%(name)s" alt="%(name)s" height="20"'
' width="20" src="%(url)s%(name)s.png" align="top">')
return tpl % {'name': name, 'url': emoji_url}
text = pattern.sub(make_emoji, text)
return text
|
michaeljoseph/pymoji
|
pymoji/emoji.py
|
Python
|
apache-2.0
| 8,018
|
[
"Bowtie",
"Octopus"
] |
64a7120120c3c06acf9e577589cf7ffb8f4c55bd9d36fea6a3c8d71cd875d956
|
# Copyright 2014, Brian Coca <bcoca@ansible.com>
# Copyright 2017, Ken Celenza <ken@networktocode.com>
# Copyright 2017, Jason Edelman <jason@networktocode.com>
# Copyright 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import math
from jinja2.filters import environmentfilter
from ansible.errors import AnsibleFilterError
from ansible.module_utils import basic
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import zip, zip_longest
from ansible.module_utils.common._collections_compat import Hashable, Mapping, Iterable
from ansible.module_utils._text import to_native, to_text
from ansible.utils.display import Display
try:
from jinja2.filters import do_unique
HAS_UNIQUE = True
except ImportError:
HAS_UNIQUE = False
display = Display()
@environmentfilter
def unique(environment, a, case_sensitive=False, attribute=None):
def _do_fail(e):
if case_sensitive or attribute:
raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
"as it does not support the parameters supplied", orig_exc=e)
error = e = None
try:
if HAS_UNIQUE:
c = do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute)
if isinstance(a, Hashable):
c = set(c)
else:
c = list(c)
except TypeError as e:
_do_fail(e)
except Exception as e:
_do_fail(e)
display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
finally:
error = e
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive or attribute:
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
if isinstance(a, Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
@environmentfilter
def intersect(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) & set(b)
else:
c = unique(environment, [x for x in a if x in b])
return c
@environmentfilter
def difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) - set(b)
else:
c = unique(environment, [x for x in a if x not in b])
return c
@environmentfilter
def symmetric_difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) ^ set(b)
else:
isect = intersect(environment, a, b)
c = [x for x in union(environment, a, b) if x not in isect]
return c
@environmentfilter
def union(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) | set(b)
else:
c = unique(environment, a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a)
def max(a):
_max = __builtins__.get('max')
return _max(a)
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise AnsibleFilterError('log() can only be used on numbers: %s' % to_native(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise AnsibleFilterError('pow() can only be used on numbers: %s' % to_native(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise AnsibleFilterError('root() can only be used on numbers: %s' % to_native(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return basic.bytes_to_human(size, isbits, unit)
except Exception:
raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return basic.human_to_bytes(size, default_unit, isbits)
except Exception:
raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
def rekey_on_member(data, key, duplicates='error'):
"""
Rekey a dict of dicts on another member
May also create a dict from a list of dicts.
duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
new_obj = {}
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
raise AnsibleFilterError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
raise AnsibleFilterError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
raise AnsibleFilterError("Key {0} was not found".format(key))
except Exception as e:
raise AnsibleFilterError(to_native(e))
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimun contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
new_obj[key_elem] = item
return new_obj
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# general math
'min': min,
'max': max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'product': itertools.product,
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
'rekey_on_member': rekey_on_member,
# zip
'zip': zip,
'zip_longest': zip_longest,
}
return filters
|
romain-dartigues/ansible
|
lib/ansible/plugins/filter/mathstuff.py
|
Python
|
gpl-3.0
| 8,065
|
[
"Brian"
] |
c03e0f6018117496afb15950dd7bfb6d72d4fbe981fb36c4dc577c0283efd08f
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
from InteractorSuperclass import InteractorSuperclass
class Interactor1stPersonUser(InteractorSuperclass):
'''
Inherit the VTK class vtkInteractorStyleUser and extend it to be a 1st-person camera for the observer.
Ref: http://www.vtk.org/doc/nightly/html/classvtkInteractorStyleUser.html#details
Important details about implementation: http://vtk.1045678.n5.nabble.com/vtkInteractorStyleUser-td2839763.html
Interactors: http://www.atamai.com/cgi-bin/viewvc.cgi/atamai/classes/PaneFrame.py?diff_format=u&pathrev=OCCIviewer-1-0-99&logsort=cvs&sortby=rev&view=diff&r1=1.25&r2=1.26
'''
def __init__(self, renderer, iren):
# Call the parent constructor
InteractorSuperclass.__init__(self, renderer, iren)
# Finally call the event handler to do a first-call update in case the model doesn't move
self.MouseMoveCallback(None, None)
def SetCameraPosition(self, posVec3):
camera = self.GetCurrentRenderer().GetActiveCamera()
#Calculate the difference between the focal point and the camera
focal = camera.GetFocalPoint()
camPos = camera.GetPosition()
focal = (focal[0] + posVec3[0] - camPos[0], focal[1] + posVec3[1] - camPos[1], focal[2] + posVec3[2] - camPos[2])
camera.SetPosition(posVec3)
camera.SetFocalPoint(focal)
def MouseMoveCallback(self, obj, event):
# Get the interactor
iren = self.GetInteractor()
if iren is None: return
# Ref: http://portal.nersc.gov/svn/visit/trunk/vendor_branches/vtk/src/Rendering/vtkInteractorStyleTrackballCamera.cxx
dx = iren.GetEventPosition()[0] - iren.GetLastEventPosition()[0];
dy = iren.GetEventPosition()[1] - iren.GetLastEventPosition()[1];
# Rotate the focal point around (yaw = x) and (pitch =y) by a factor of the mouse differentials dx and dy
camera = self.GetCurrentRenderer().GetActiveCamera()
camera.SetRoll(0)
screenSize = iren.GetRenderWindow().GetSize()
# Yaw changes in a negative direction but the pitch is correct
camera.Yaw(-float(dx) / float(screenSize[0]) * 360 * 2.0);
camera.Pitch(float(dy) / float(screenSize[1]) * 180 / 2.0);
# Update the clipping range of the camera
self.GetCurrentRenderer().ResetCameraClippingRange()
# Move it to the center of the screen again so we stay away from the bounds.
#iren.GetRenderWindow().SetCursorPosition(512, 384)
def KeydownCallback(self, obj, event):
'''
Responding to keyboard events for now, want something more interactive later.
Ref: http://portal.nersc.gov/svn/visit/tags/2.6.0/vendor_branches/vtk/src/Examples/GUI/Python/CustomInteraction.py
'''
# Get the interactor
iren = self.GetInteractor()
if iren is None: return
key = iren.GetKeyCode()
if key == "8": # Move forward
self.__MoveForward()
if key == "5": # Move backward
self.__MoveBackward()
if key == "6": # Move right
self.__MoveRight()
if key == "4":
self.__MoveLeft()
def KeyupCallback(self, obj, event):
return
def __MoveForward(self):
camera = self.GetCurrentRenderer().GetActiveCamera()
cam = camera.GetPosition()
focal = camera.GetFocalPoint()
vec = [0, 0, 0]
newCam = [0, 0, 0]
newFocal = [0, 0, 0]
vtk.vtkMath.Subtract(focal, cam, vec)
vtk.vtkMath.Normalize(vec)
vtk.vtkMath.Add(cam, vec, newCam)
vtk.vtkMath.Add(focal, vec, newFocal)
camera.SetPosition(newCam)
camera.SetFocalPoint(newFocal)
# Update the clipping range of the camera
self.GetCurrentRenderer().ResetCameraClippingRange()
def __MoveBackward(self):
camera = self.GetCurrentRenderer().GetActiveCamera()
cam = camera.GetPosition()
focal = camera.GetFocalPoint()
vec = [0, 0, 0]
newCam = [0, 0, 0]
newFocal = [0, 0, 0]
vtk.vtkMath.Subtract(focal, cam, vec)
vtk.vtkMath.Normalize(vec)
vtk.vtkMath.Subtract(cam, vec, newCam)
vtk.vtkMath.Subtract(focal, vec, newFocal)
camera.SetPosition(newCam)
camera.SetFocalPoint(newFocal)
# Update the clipping range of the camera
self.GetCurrentRenderer().ResetCameraClippingRange()
def __MoveRight(self):
camera = self.GetCurrentRenderer().GetActiveCamera()
cam = camera.GetPosition()
focal = camera.GetFocalPoint()
up = [0, 1, 0] #We don't want roll
vec = [0, 0, 0]
newCam = [0, 0, 0]
newFocal = [0, 0, 0]
vtk.vtkMath.Subtract(focal, cam, vec)
vec[1] = 0 #We don't want roll
vtk.vtkMath.Normalize(vec)
vtk.vtkMath.Cross(vec, up, vec)
vtk.vtkMath.Add(cam, vec, newCam)
vtk.vtkMath.Add(focal, vec, newFocal)
camera.SetPosition(newCam)
camera.SetFocalPoint(newFocal)
# Update the clipping range of the camera
self.GetCurrentRenderer().ResetCameraClippingRange()
def __MoveLeft(self):
camera = self.GetCurrentRenderer().GetActiveCamera()
cam = camera.GetPosition()
focal = camera.GetFocalPoint()
up = [0, 1, 0] #We don't want roll
vec = [0, 0, 0]
newCam = [0, 0, 0]
newFocal = [0, 0, 0]
vtk.vtkMath.Subtract(focal, cam, vec)
vec[1] = 0 #We don't want roll
vtk.vtkMath.Normalize(vec)
vtk.vtkMath.Cross(vec, up, vec)
vtk.vtkMath.Subtract(cam, vec, newCam)
vtk.vtkMath.Subtract(focal, vec, newFocal)
camera.SetPosition(newCam)
camera.SetFocalPoint(newFocal)
# Update the clipping range of the camera
self.GetCurrentRenderer().ResetCameraClippingRange()
|
GearsAD/semisorted_arnerve
|
sandbox/bot_vis_platform_oculus/scene/Interactor1stPersonUser.py
|
Python
|
mit
| 6,049
|
[
"VTK",
"VisIt"
] |
bd3effbce769291628f2a8d70d61b66b889ca6ba9583fafaf6ea4a7b57ffd43c
|
#!/usr/bin/env python2
"""
EMIRGE: Expectation-Maximization Iterative Reconstruction of Genes from the Environment
Copyright (C) 2010-2016 Christopher S. Miller (christopher.s.miller@ucdenver.edu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
https://github.com/csmiller/EMIRGE
for help, type:
python emirge_amplicon.py --help
"""
USAGE = \
"""usage: %prog DIR <required_parameters> [options]
This version of EMIRGE (%prog) attempts to reconstruct rRNA SSU genes
from Illumina amplicon data. It can handle up to a few million rRNA
reads at a time.
DIR is the working directory to process data in.
Use --help to see a list of required and optional arguments
Additional information:
https://groups.google.com/group/emirge-users
https://github.com/csmiller/EMIRGE/wiki
If you use EMIRGE in your work, please cite these manuscripts, as appropriate.
Miller CS, Baker BJ, Thomas BC, Singer SW, Banfield JF (2011)
EMIRGE: reconstruction of full-length ribosomal genes from microbial community short read sequencing data.
Genome biology 12: R44. doi:10.1186/gb-2011-12-5-r44.
Miller CS, Handley KM, Wrighton KC, Frischkorn KR, Thomas BC, Banfield JF (2013)
Short-Read Assembly of Full-Length 16S Amplicons Reveals Bacterial Diversity in Subsurface Sediments.
PloS one 8: e56018. doi:10.1371/journal.pone.0056018.
"""
import sys
import os
import re
import csv
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
import pysam
import numpy
from scipy import sparse
from subprocess import Popen, PIPE, check_call, CalledProcessError, check_output
from time import ctime, time
from datetime import timedelta
import gzip
import cPickle
import _emirge_amplicon as _emirge
# from ctrie import Trie
# from pykseq import Kseq
BOWTIE_l = 20
BOWTIE_e = 300
BOWTIE_ASCII_OFFSET = 33 # currently, bowtie writes quals with an ascii offset of 33
class Record:
"""
stripped down FASTA record class with same members as
Biopython FASTA Record Class
title -- with > removed
sequence -- as string
"""
_colwidth = 60 # colwidth specifies the number of residues to put on each line when generating FASTA format.
def __init__(self, title = "", sequence = ""):
"""
Create a new Record.
"""
self.title = title
self.sequence = sequence
def __str__(self):
return ">%s\n%s\n"%(self.title, "\n".join(re.findall(".{1,%s}"%self._colwidth, self.sequence)))
def FastIterator(filehandle, dummyParser = None, record = None):
"""
a generator to return records one at a time. Maybe 160% faster on test case.
MAY RAISE MemoryError ON LARGE FASTA FILES
IN: file object
dummyParser is a placeholder for RecordParser from Biopython. Not used.
a record to use for yielding. Otherwise create an empty one with standard init
NOTE: this fasta iterator is fast, but it breaks easily with nonstandard input, for example
if there are "\r" in endlines.
"""
if record is None:
record = Record()
for recordstring in re.split('\n>', filehandle.read()[1:]):
record.title, record.sequence = recordstring.split('\n',1)
record.sequence = record.sequence.replace('\n','').replace(' ','')
yield record
class EM(object):
"""
driver class for EM algorithm
"""
_VERBOSE = True
base2i = {"A":0,"T":1,"C":2,"G":3}
i2base = dict([(v,k) for k,v in base2i.iteritems()])
# asciibase2i = {65:0,84:1,67:2,71:3}
clustermark_pat = re.compile(r'(\d+\|.?\|)?(.*)') # cludgey code associated with this should go into a method: get_seq_i()
DEFAULT_ERROR = 0.05
def __init__(self, reads1_filepath, reads2_filepath,
insert_mean,
insert_sd,
n_cpus = 1,
cwd = os.getcwd(), max_read_length = 76,
iterdir_prefix = "iter.", cluster_thresh = 0.97,
mapping_nice = None,
reads_ascii_offset = 64,
expected_coverage_thresh = 10,
rewrite_reads = True):
"""
n_cpus is how many processors to use for multithreaded steps (currently only the bowtie mapping)
mapping_nice is nice value to add to mapping program
"""
self.reads1_filepath = reads1_filepath
self.reads2_filepath = reads2_filepath
self.insert_mean = insert_mean
self.insert_sd = insert_sd
self.n_cpus = n_cpus
self.mapping_nice = mapping_nice
self.reads_ascii_offset = reads_ascii_offset
self.iteration_i = None # keeps track of which iteration we are on.
self.cwd = cwd
self.max_read_length = max_read_length
self.iterdir_prefix = iterdir_prefix
self.cluster_thresh = cluster_thresh # if two sequences evolve to be >= cluster_thresh identical (via usearch), then merge them. [0, 1.0]
assert self.cluster_thresh >= 0 and self.cluster_thresh <= 1.0
self.expected_coverage_thresh = expected_coverage_thresh
# Single numpy array. Has the shape: (numsequences x numreads) [numreads can be numpairs]
self.likelihoods = None # = Pr(R_i|S_i), the likelihood of generating read R_i given sequence S_i
# list of numpy arrays. list index is iteration number. Each numpy array has the shape: (numsequences,)
self.priors = [] # = Pr(S_i), the prior probability that sequence S generated any read
# list of numpy arrays. list index is iteration number. Each numpy array has the shape: (numsequences x numreads)
self.posteriors = [] # = Pr(S_i|R_i), the posterior probability that sequence S_i generated read R_i
# dict's and list keeping id mappings between sequence names and internal indices (seq_i)
# index is stable between iterations. If sequence_i2sequence value is None, means this sequence was abandoned in a previous round
self.sequence_name2sequence_i = {}
self.sequence_i2sequence_name = [] # list index is iteration number.
self.split_seq_first_appeared = {} # seq_i --> iteration first seen. Useful for keeping track of when a sequence first appeared, and not allowing merging of recently split out sequences
# similar to above except for reads -- depreciated
# self.read_name2read_i = {} # Trie()
# self.read_i2read_name = numpy.array([], dtype=numpy.uint) -- DEPRECIATED
self.n_reads = 0 # in fastq input (number of reads **or number of read pairs**)
self.n_reads_mapped = 0
self.n_sequences = 0
# other constants, potentially changeable, tunable later, or could incorporate into probabilistic model.
self.min_depth = 5.0 # minimum depth to keep sequence around for next round
self.min_prior = None # minimum prior probability for a sequence to keep it around for next round (alternative to depth, which is
# a little weird when you allow mappings to more than one place. NOT YET IMPLEMENTED
self.base_coverages = [] # list of numpy arrays -- per base coverage values.
self.min_length_coverage_def = 1 # EXPERIMENTAL: Minimum coverage in order to be counted in min_length_coverage
self.min_length_coverage = None # EXPERIMENTAL. Fraction of length that has to be covered by >= min_length_cov_depth
self.snp_minor_prob_thresh = 0.10 # if prob(N) for minor allele base N is >= this threshold, call site a minor allele
self.snp_percentage_thresh = 0.10 # if >= this percentage of bases are minor alleles (according to self.snp_minor_prob_thresh),
# then split this sequence into two sequences.
# rewrite reads for index mapping, set self.n_reads
self.temporary_files = [] # to remove at end of run
if rewrite_reads:
self.rewrite_reads() # also sets self.n_reads
else: # hidden option in main to avoid rewriting reads from big files more than necessary
# if already has correct integer read neames, then simply count reads in file
if self._VERBOSE:
sys.stderr.write("Counting reads in input files at %s...\n"%(ctime()))
start_time = time()
cmd = "cat %s | wc -l"%(self.reads1_filepath)
if self.reads1_filepath.endswith('.gz'):
cmd = "z" + cmd
p = Popen(cmd, shell=True, stdout=PIPE)
stdoutdata, stderrdata = p.communicate()
self.n_reads = int(stdoutdata.strip())
if self._VERBOSE:
sys.stderr.write("DONE Counting reads in input files at %s [%s]...\n"%(ctime(), timedelta(seconds = time()-start_time)))
if self._VERBOSE:
sys.stderr.write("Number of reads (or read pairs) in input file(s): %d\n"%(self.n_reads))
self.reads_seen = numpy.zeros(self.n_reads, dtype=numpy.uint8) # bool matrix of reads seen mapped at any iteration
# where 1st dimension is read index (from rewritten file headers)
# and second dimension is read number (0 or 1 ==> read /1 or read /2)
# 3rd dimension for reads and quals is max_readlen
self.reads = numpy.empty((self.n_reads, 2, self.max_read_length), dtype=numpy.uint8)
self.quals = numpy.empty_like(self.reads)
self.readlengths = numpy.empty((self.n_reads, 2), dtype = numpy.uint16)
# read through reads file again, fill these.
if self._VERBOSE:
sys.stderr.write("Preallocating reads and quals in memory at %s...\n"%(ctime()))
start_time = time()
_emirge.populate_reads_arrays(self)
if self._VERBOSE:
sys.stderr.write("DONE Preallocating reads and quals in memory at %s [%s]...\n"%(ctime(), timedelta(seconds = time()-start_time)))
return
def rewrite_reads(self):
"""
rewrite reads files with indices as only info in header.
Though this requires an inefficient rewrite of the fastq file,
it means that reading of bam files do not require a costly separate
id lookup step on the read name.
also: set self.reads_n
initialize self.reads_seen # bool matrix of reads seen mapped at any iteration
"""
if self._VERBOSE:
sys.stderr.write("Rewriting reads with indices in headers at %s...\n"%(ctime()))
start_time = time()
tmp_n_reads_file_path = os.path.join(self.cwd, "emirge_tmp_n_reads.txt")
for i in (1, 2):
reads_filepath = getattr(self, "reads%s_filepath"%i)
if reads_filepath is None: # if not paired end, then self.reads2_filepath should be None
continue
new_reads_filepath = os.path.join(self.cwd, "emirge_tmp_reads_%s.fastq"%i)
self.temporary_files.append(new_reads_filepath)
setattr(self, "reads%s_filepath"%i, new_reads_filepath)
# first try awk, which is fast:
try:
cmd = """cat %s | awk 'BEGIN {i=0} {if ((NR-1)%%4==0) {print "@"i; i++} else print $0} END {print i > "%s"} ' > %s"""%(reads_filepath, tmp_n_reads_file_path, new_reads_filepath)
if reads_filepath.endswith('.gz'):
cmd = 'z' + cmd
check_call(cmd, shell=True, stdout = sys.stdout, stderr = sys.stderr)
self.n_reads = int(file(tmp_n_reads_file_path).readline().strip())
os.remove(tmp_n_reads_file_path)
continue # awk code worked
except CalledProcessError:
if self._VERBOSE:
sys.stderr.write("\tawk rewrite of reads failed! Is awk installed?\n")
raise
# sys.stderr.write("\tawk rewrite failed, falling back to pykseq...\n")
# COMMENTED OUT FOR THE TIME BEING. REASONABLE TO EXPECT AWK
# if code reaches here, means awk failed, so use pykseq instead (about 2X slower)
# outf = file(new_reads_filepath, 'w')
# outf_write = outf.write
# ks = Kseq(reads_filepath)
# i = 0
# while 1:
# t = ks.read_sequence_and_quals()
# if t is None:
# break
# else:
# outf_write("@%s\n%s\n+\n%s\n" % (i, t[1], t[2]))
# i += 1
# outf.close()
# del ks
# self.n_reads = i
if self._VERBOSE:
sys.stderr.write("DONE Rewriting reads with indexes in headers at %s [%s]...\n"%(ctime(), timedelta(seconds = time()-start_time)))
return
def read_bam(self, bam_filename, reference_fasta_filename):
"""
reads a bam file and...
updates:
self.sequence_i2sequence_name # a numpy array
self.sequence_name2sequence_i # a dict
self.read_name2read_i # a dict
self.probN
doesn't do anything with these anymore, they should be populated and stable with _emirge.populate_reads_arrays
self.reads
self.quals
self.readlengths
creates a new EMPTY entry for (appends to list, removes t-2
self.priors
self.posteriors
creates new each iteration (overwrites):
self.likelihoods
self.unmapped_bases
self.coverage
self.bamfile_data
This MUST maintain seq_i to name and read_i to name mappings between iterations, so that a single
name always maintains the same index from one iteration to the next. One result of this requirement
is that the various matrices can always get larger in a later t, but never smaller (as reads or seqs are added)
"""
if self._VERBOSE:
sys.stderr.write("Reading bam file %s at %s...\n"%(bam_filename, ctime()))
start_time = time()
initial_iteration = self.iteration_i < 0 # this is initial iteration
self.current_bam_filename = bam_filename
self.current_reference_fasta_filename = reference_fasta_filename
self.fastafile = pysam.Fastafile(self.current_reference_fasta_filename)
# set here:
# self.sequence_name2sequence_i
# self.sequence_i2sequence_name
# self.bamfile_data numpy array with (seq_i, read_i, pair_i, rlen, pos, is_reverse)
_emirge.process_bamfile(self, BOWTIE_ASCII_OFFSET)
self.n_sequences = len(self.sequence_name2sequence_i)
t_check = time()
self.priors.append(numpy.zeros(self.n_sequences, dtype = numpy.float))
self.likelihoods = sparse.coo_matrix((self.n_sequences, self.n_reads), dtype = numpy.float) # init all to zero.
self.posteriors.append(sparse.lil_matrix((self.n_sequences+1, self.n_reads+1), dtype=numpy.float))
self.probN = [None for x in range(self.n_sequences)] # TODO: is this necessary any more? or is bookkeeping with probN good enough now.
self.unmapped_bases = [None for x in self.probN]
self.mean_read_length = numpy.mean(self.readlengths)
# reset probN for valid sequences (from current_reference_fasta_filename).
# is this still necessary? Or do I keep probN bookkeeping in order already?
t_check = time()
_emirge.reset_probN(self) # also updates coverage values and culls via fraction of length covered
# print >> sys.stderr, "DEBUG: reset_probN loop time: %s"%(timedelta(seconds = time()-t_check))
for d in [self.priors, self.posteriors]:
if len(d) > 2:
trash = d.pop(0) # no longer care about t-2
del trash
if self._VERBOSE:
sys.stderr.write("DONE Reading bam file %s at %s [%s]...\n"%(bam_filename, ctime(), timedelta(seconds = time()-start_time)))
return
def initialize_EM(self, bam_filename, reference_fasta_filename, randomize_priors = False):
"""
Set up EM with two things so that first iteration can proceed:
- Initial guesses of Pr(S) are made purely based on read counts, where each read is only allowed to
map only once to a single best reference (**if more than one alignment reported per read, raise exception!**).
- Initial guess of Pr(N=n) (necessary for likelihood in Pr(S|R) is also calculated simply, with the assumption
of 1 read (the best again) mapped to exactly 1 sequence. Thus Pr(N=n) only takes the base call errors
into account. This is actually not done here, but rather the first time self.calc_probN is called.
- bamfile for iteration 0 is assumed to have just one ("best") mapping per read.
- there is no t-1 for t = 0, hence the need to set up Pr(S)
if randomize_priors == True, then after calculating priors,
shuffle them randomly. This is useful for debugging
purposes, to test effect of initialization, robustness of
results, and how often the algorithm gets stuck in local
maxima.
"""
if self._VERBOSE:
sys.stderr.write("Beginning initialization at %s...\n"%(ctime()))
self.iteration_i = -1
self.read_bam(bam_filename, reference_fasta_filename)
# initialize priors. Here just adding a count for each read mapped to each reference sequence
# since bowtie run with --best and reporting just 1 alignment at random, there is some stochasticity here.
for (seq_i, read_i, pair_i, rlen, pos, is_reverse) in self.bamfile_data:
# if self.probN[seq_i] is not None:
self.priors[-1][seq_i] += 1
# this shouldn't be necessary with way I do initial mapping right now (all seq_i in priors should be nonzero initially)
nonzero_indices = numpy.nonzero(self.priors[-1]) # only divide cells with at least one count. Set all others to Pr(S) = 0
self.priors[-1] = self.priors[-1][nonzero_indices] / self.priors[-1][nonzero_indices].sum() # turn these into probabilities
if randomize_priors:
numpy.random.shuffle(self.priors[-1])
self.priors.append(self.priors[-1].copy()) # push this back to t-1 (index == -2)
# write priors as special case:
self.print_priors(os.path.join(self.cwd, "priors.initialized.txt"))
if self._VERBOSE:
sys.stderr.write("DONE with initialization at %s...\n"%(ctime()))
return
def do_iteration(self, bam_filename, reference_fasta_filename):
"""
This starts with the M-step, so it requires that Pr(S) and Pr(N=n) from previous round are set.
Pr(S) is used from the previous round's E-step.
Pr(N=n) partially depends on the previous round's M-step.
Once M-step is done, then E-step calculates Pr(S) based upon the just-calculated M-step.
"""
self.iteration_i += 1
if self._VERBOSE:
sys.stderr.write("Starting iteration %d at %s...\n"%(self.iteration_i, ctime()))
start_time = time()
self.iterdir = os.path.join(self.cwd, "%s%02d"%(self.iterdir_prefix, self.iteration_i))
check_call("mkdir -p %s"%(self.iterdir), shell=True)
self.read_bam(bam_filename, reference_fasta_filename) # initializes all data structures.
# m-step
self.calc_likelihoods()
self.calc_posteriors()
# now e-step
self.calc_priors()
# now write a new fasta file. Cull sequences below self.min_depth
consensus_filename = os.path.join(self.iterdir, "iter.%02d.cons.fasta"%(self.iteration_i))
self.write_consensus(consensus_filename) # culls and splits
self.cluster_sequences(consensus_filename) # merges sequences that have evolved to be the same (USEARCH)
# leave a few things around for later. Note that print_priors also leaves sequence_name2sequence_i mapping, basically.
if self._VERBOSE:
sys.stderr.write("Writing priors and probN to disk for iteration %d at %s...\n"%(self.iteration_i, ctime()))
self.print_priors()
# python gzip.GzipFile is slow. Use system call to gzip instead
pickled_filename = os.path.join(self.iterdir, 'probN.pkl')
cPickle.dump(self.probN, file(pickled_filename, 'w'), cPickle.HIGHEST_PROTOCOL)
check_call("gzip -f %s"%(pickled_filename), shell=True, stdout = sys.stdout, stderr = sys.stderr)
if self._VERBOSE:
sys.stderr.write("DONE Writing priors and probN to disk for iteration %d at %s...\n"%(self.iteration_i, ctime()))
# delete bamfile from previous round (keep -- and convert to
# compressed bam -- initial iteration mapping in the
# background)
if self.iteration_i == 0 and self.current_bam_filename.endswith(".u.bam"): # initial iteration
renamed = self.current_bam_filename.rstrip(".u.bam") + ".bam"
# self.initial_compress_process = Popen(["samtools", "view", "-h", "-b", self.current_bam_filename, "-o", renamed], stdout = sys.stdout, stderr = sys.stderr) # child process runs in background
self.initial_compress_process = Popen("samtools view -h -b %s > %s"%(self.current_bam_filename, renamed), shell=True, stderr = sys.stderr) # child process runs in background
self.initial_bam_filename_to_remove = self.current_bam_filename
if self.iteration_i >= 1:
os.remove(self.current_bam_filename)
# check up on initial mapping compression background process once per iteration here
if self.initial_compress_process is not None:
poll = self.initial_compress_process.poll()
if poll == 0: # completed successfully
os.remove(self.initial_bam_filename_to_remove)
self.initial_compress_process = None # don't bother in future
elif poll is None:
if self.iteration_i == self.max_iterations - 1: # shouldn't happen... but to be correct
print >> sys.stderr, "Waiting for initial bamfile to compress before finishing...",
self.initial_compress_process.wait()
print >> sys.stderr, "DONE"
else:
pass
else: # poll() returned something bad.
print >> sys.stderr, "WARNING: Failed to compress initial mapping bamfile %s.\nWARNING: Failure with exit code: %s.\nWARNING: File remains uncompressed: %s"%(poll, self.initial_bam_filename_to_remove)
self.initial_compress_process = None # don't bother in future
# now do a new mapping run for next iteration
self.do_mapping(consensus_filename, nice = self.mapping_nice)
if self._VERBOSE:
sys.stderr.write("Finished iteration %d at %s...\n"%(self.iteration_i, ctime()))
sys.stderr.write("Total time for iteration %d: %s\n"%(self.iteration_i, timedelta(seconds = time()-start_time)))
return
def print_priors(self, ofname = None):
"""
leave a file in directory with nonzero priors printed out.
"""
if ofname is not None:
of = file(ofname, 'w')
else:
of = file(os.path.join(self.iterdir, "priors.iter.%02d.txt"%(self.iteration_i)), 'w')
sequence_i2sequence_name_array = numpy.array(self.sequence_i2sequence_name) # faster slicing?
for seq_i, prior in enumerate(self.priors[-1]):
seqname = sequence_i2sequence_name_array[seq_i]
of.write("%d\t%s\t%.10f\n"%(seq_i, seqname, prior))
of.close()
def calc_priors(self):
"""
calculates priors [ Pr(S) ] based on
Pr(S|R) (current posteriors from previous M step, this iteration)
"""
# here we do have column summing with the posteriors
# therefore, should be csc sparse type for efficient summing
self.posteriors[-1] = self.posteriors[-1].tocsc()
self.priors[-1] = numpy.asarray(self.posteriors[-1].sum(axis = 1)).flatten() / self.posteriors[-1].sum()
return
def write_consensus(self, outputfilename):
"""
writes a consensus, taking the most probable base at each position, according to current
values in Pr(N=n) (self.probN)
only write sequences with coverage above self.min_depth (culling)
split sequences with many minor alleles:
self.snp_minor_prob_thresh # if prob(N) for minor allele base N is >= this threshold, call site a minor allele
self.snp_percentage_thresh # if >= this percentage of bases are minor alleles (according to self.snp_minor_prob_thresh),
# then split this sequence into two sequences.
"""
if self._VERBOSE:
sys.stderr.write("Writing consensus for iteration %d at %s...\n"%(self.iteration_i, ctime()))
sys.stderr.write("\tsnp_minor_prob_thresh = %.3f\n"%(self.snp_minor_prob_thresh))
sys.stderr.write("\tsnp_percentage_thresh = %.3f\n"%(self.snp_percentage_thresh))
t0 = time()
splitcount = 0
cullcount = 0
of = file(outputfilename, 'w')
times_split = [] # DEBUG
times_posteriors = [] # DEBUG
seqs_to_process = len(self.probN) # DEBUG
i2base = self.i2base
rows_to_add = [] # these are for updating posteriors at end with new minor strains
cols_to_add = []
data_to_add = []
probNtoadd = [] # for newly split out sequences
self.posteriors[-1] = self.posteriors[-1].tolil() # just to make sure this is in row-access-friendly format
loop_t0 = time()
for seq_i in range(len(self.probN)):
seq_i_t0 = time()
if self.probN[seq_i] is None: # means this sequence is no longer present in this iteration or was culled in reset_probN
continue
# FOLLOWING CULLING RULES REMOVED in favor of length-coverage culling in reset_probN()
# check if coverage passes self.min_depth, if not don't write it (culling happens here)
# if self.min_depth is not None and self.coverage[seq_i] < self.min_depth: # and self.iteration_i > 5:
# # could adjust priors and posteriors here, but because
# # prior will already be low (b/c of low coverage) and
# # because next round will have 0 mappings (no sequence
# # in reference file to map to), this seems
# # unneccesary.
# # probNarray = None # NOT PASSED BY REF, assignment is only local?
# self.probN[seq_i] = None
# cullcount += 1
# continue # continue == don't write it to consensus.
# else passes culling thresholds
title = self.sequence_i2sequence_name[seq_i]
consensus = numpy.array([i2base.get(x, "N") for x in numpy.argsort(self.probN[seq_i])[:,-1]])
# check for minor allele consensus, SPLIT sequence into two candidate sequences if passes thresholds.
minor_indices = numpy.argwhere((self.probN[seq_i] >= self.snp_minor_prob_thresh).sum(axis=1) >= 2)[:,0]
if minor_indices.shape[0] > 0:
minor_fraction_avg = numpy.mean(self.probN[seq_i][(minor_indices, numpy.argsort(self.probN[seq_i][minor_indices])[:, -2])])
else:
minor_fraction_avg = 0.0
# NEW rule: only split sequence if *expected* coverage
# of newly split minor sequence (assuming uniform read
# coverage over reconstructed sequence) is > some
# threshold. Here, expected coverage is calculated
# based on:
# Prior(seq_i) * number of MAPPED reads * avg read length * 2 seq per pair
expected_coverage_minor = ( self.priors[-1][seq_i] * minor_fraction_avg * self.n_reads_mapped * self.mean_read_length ) / self.probN[seq_i].shape[0]
expected_coverage_major = ( self.priors[-1][seq_i] * (1-minor_fraction_avg) * self.n_reads_mapped * self.mean_read_length ) / self.probN[seq_i].shape[0]
if self.reads2_filepath is not None: # multipy by 2 because n_reads_mapped is actually number of mapped pairs
expected_coverage_minor = expected_coverage_minor * 2.0
expected_coverage_major = expected_coverage_major * 2.0
if minor_indices.shape[0] / float(self.probN[seq_i].shape[0]) >= self.snp_percentage_thresh and \
expected_coverage_minor >= self.expected_coverage_thresh:
# We split!
splitcount += 1
if self._VERBOSE:
t0_split = time()
major_fraction_avg = 1.-minor_fraction_avg # if there's >=3 alleles, major allele keeps prob of other minors)
minor_bases = numpy.array([i2base.get(x, "N") for x in numpy.argsort(self.probN[seq_i][minor_indices])[:,-2]]) # -2 gets second most probably base
minor_consensus = consensus.copy() # get a copy of the consensus
minor_consensus[minor_indices] = minor_bases # replace the bases that pass minor threshold
# now deal with naming.
title_root = re.search(r'(.+)(_m(\d+))$', title)
if title_root is None: # no _m00 on this name
title_root = title[:]
else:
title_root = title_root.groups()[0]
# now check for any known name with same root and a _m on it.
previous_m_max = max([0] + [int(x) for x in re.findall(r'%s_m(\d+)'%re.escape(title_root), " ".join(self.sequence_i2sequence_name))])
m_title = "%s_m%02d"%(title_root, previous_m_max+1)
# also split out Priors and Posteriors (which will be used in next round), split with average ratio of major to minor alleles.
# updating priors first:
old_prior = self.priors[-1][seq_i]
self.priors[-1][seq_i] = old_prior * major_fraction_avg
seq_i_minor = self.n_sequences
self.n_sequences += 1
self.sequence_i2sequence_name.append(m_title)
assert len(self.sequence_i2sequence_name) == self.n_sequences
assert len(self.sequence_i2sequence_name) == seq_i_minor + 1
self.sequence_name2sequence_i[m_title] = seq_i_minor
self.split_seq_first_appeared[seq_i] = self.iteration_i
# how I adjust probN here for newly split seq doesn't really matter,
# as it is re-calculated next iter.
# this only matters for probN.pkl.gz file left behind for this iteration.
# for now just set prob(major base) = 0 and redistribute prob to other bases for minor,
# and set prob(minor base) = 0 and redistribute prob to other bases for major
# MINOR
major_base_i = numpy.argsort(self.probN[seq_i][minor_indices])[:, -1]
newprobNarray = self.probN[seq_i].copy()
newprobNarray[(minor_indices, major_base_i)] = 0
newprobNarray = newprobNarray / numpy.sum(newprobNarray, axis=1).reshape(newprobNarray.shape[0], 1)
probNtoadd.append(newprobNarray)
self.base_coverages.append(numpy.zeros_like(self.base_coverages[seq_i]))
# MAJOR
minor_base_i = numpy.argsort(self.probN[seq_i][minor_indices])[:, -2]
self.probN[seq_i][(minor_indices, minor_base_i)] = 0
self.probN[seq_i] = self.probN[seq_i] / numpy.sum(self.probN[seq_i], axis=1).reshape(self.probN[seq_i].shape[0], 1)
new_priors = numpy.zeros(seq_i_minor+1, dtype=self.priors[-1].dtype)
new_priors[:-1] = self.priors[-1].copy()
new_priors[seq_i_minor] = old_prior * minor_fraction_avg
trash = self.priors.pop()
del trash
self.priors.append(new_priors)
# keep track of all new minor data to add and add it
# once at end for ALL split sequences with one coo
# matrix construction, instead of each iteration.
t_posterior = time()
# new_read_probs, new_rows, new_cols = adjust_posteriors_for_split(AAAA, BBBB, CCCC) # TODO: could move to Cython
# updating posteriors. for each seq-read pair with prob > 0, split prob out to major and minor seq.
new_cols = self.posteriors[-1].rows[seq_i] # col in coo format
new_read_probs = [x * minor_fraction_avg for x in self.posteriors[-1].data[seq_i]] # data in coo format
new_rows = [seq_i_minor for x in new_cols] # row in coo format
# add new read probs to cache of new read probs to add at end of loop
rows_to_add.extend(new_rows)
cols_to_add.extend(new_cols)
data_to_add.extend(new_read_probs)
# adjust old read probs to reflect major strain
self.posteriors[-1].data[seq_i] = [x * major_fraction_avg for x in self.posteriors[-1].data[seq_i]]
times_posteriors.append(time() - t_posterior)
# adjust self.unmapped_bases (used in clustering). For now give same pattern as parent
self.unmapped_bases.append(self.unmapped_bases[seq_i].copy())
# write out minor strain consensus
of.write(">%s\n"%(m_title))
of.write("%s\n"%("".join(minor_consensus)))
if self._VERBOSE:
sys.stderr.write("splitting sequence %d (%s) to %d (%s)...\n"%(seq_i, title,
seq_i_minor, m_title))
times_split.append(time()-seq_i_t0)
# now write major strain consensus, regardless of whether there was a minor strain consensus
of.write(">%s\n"%(title))
of.write("%s\n"%("".join(consensus)))
# END LOOP
loop_t_total = time() - loop_t0
# update posteriors matrix with newly added minor sequences new_posteriors via coo, then convert to csr.
new_posteriors = self.posteriors[-1].tocoo() # first make a copy in coo format
# then create new coo matrix with new shape, appending new row, col, data to old row, col, data
new_posteriors = sparse.coo_matrix((numpy.concatenate((new_posteriors.data, data_to_add)),
(numpy.concatenate((new_posteriors.row, rows_to_add)),
numpy.concatenate((new_posteriors.col, cols_to_add)))),
shape=(self.n_sequences, self.posteriors[-1].shape[1]),
dtype=new_posteriors.dtype).tocsr()
# finally, exchange in this new matrix
trash = self.posteriors.pop()
del trash
self.posteriors.append(new_posteriors)
# update probN array:
self.probN.extend(probNtoadd)
if self._VERBOSE:
total_time = time()-t0
sys.stderr.write("\tSplit out %d new minor strain sequences.\n"%(splitcount))
if splitcount > 0:
sys.stderr.write("\tAverage time for split sequence: [%.6f seconds]\n"%numpy.mean(times_split))
sys.stderr.write("\tAverage time for posterior update: [%.6f seconds]\n"%numpy.mean(times_posteriors))
sys.stderr.write("\tAverage time for non-split sequences: [%.6f seconds]\n"%((loop_t_total - sum(times_split)) / (seqs_to_process - len(times_split))))
# sys.stderr.write("\tCulled %d sequences\n"%(cullcount))
sys.stderr.write("DONE Writing consensus for iteration %d at %s [%s]...\n"%(self.iteration_i, ctime(), timedelta(seconds = total_time)))
return
def write_consensus_with_mask(self, reference_fastafilename, output_fastafilename, mask):
"""
write a consensus sequence to output_fastafilename for each
sequence in probN where unmapped bases are replaced with:
mask == "hard" --> N
mask == "soft" --> lowercase letters
If masking with soft bases, use reference_fastafilename for bases to use for unmapped bases.
this is useful prior to usearch clustering.
OUT: number of sequences processed
"""
n_seqs = 0
i2base_get = self.i2base.get # for speed
of = file(output_fastafilename, 'w')
reference_fastafile = pysam.Fastafile(reference_fastafilename)
for seq_i in range(len(self.probN)):
if self.probN[seq_i] is None:
continue
title = self.sequence_i2sequence_name[seq_i]
consensus = numpy.array([i2base_get(x, "N") for x in numpy.argsort(self.probN[seq_i])[:,-1]])
orig_bases = numpy.array(reference_fastafile.fetch(title).lower(), dtype='c')
# now replace consensus bases with no read support with N
# unmapped_indices = numpy.where(self.unmapped_bases[seq_i] == 1)
unmapped_indices = numpy.where(consensus == "N")
if mask == "hard":
consensus[unmapped_indices] = 'N'
elif mask == "soft":
for unmapped_i in unmapped_indices[0]:
consensus[unmapped_i] = orig_bases[unmapped_i] # return to original base if unmapped / ambiguous
# consensus[unmapped_indices] = [letter.lower() for letter in consensus[unmapped_indices]]
else:
raise ValueError, "Invalid valud for mask: %s (choose one of {soft, hard}"%mask
of.write(">%s\n"%(title))
of.write("%s\n"%("".join(consensus)))
n_seqs += 1
of.close()
return n_seqs
def cluster_sequences(self, fastafilename):
"""
Right now, this simply calls cluster_sequences_usearch, which
uses USEARCH. Could swap in other functions here if there
were faster or just alternative clustering methods to try out
called function should also adjust Pr(S) [prior] and Pr(S_t-1)
[posteriors] as needed after merging.
"""
return self.cluster_sequences_usearch(fastafilename)
def cluster_sequences_usearch(self, fastafilename):
"""
uses Edgar's USEARCH to merge sequences above self.cluster_thresh %ID over the
length of the shorter sequence
"Search and clustering orders of magnitude faster than BLAST"
Robert C. Edgar
Bioinformatics 2010
Merge two sequences if the *NON-GAPPED* positions have %
identity >= self.cluster_thresh
also adjusts Pr(S) [prior] and Pr(S_t-1) [posteriors] as needed after merging.
"""
if self._VERBOSE:
sys.stderr.write("Clustering sequences for iteration %d at %s...\n"%(self.iteration_i, ctime()))
sys.stderr.write("\tcluster threshold = %.3f\n"%(self.cluster_thresh))
start_time = time()
tocleanup = [] # list of temporary files to remove after done
# get posteriors ready for slicing (just prior to this call, is csr matrix?):
self.posteriors[-1] = self.posteriors[-1].tolil()
# NOTE that this fasta file now contains N's where there are
# no mapped bases, so that usearch with iddef 0 will not count
# positions aligned to these bases in the identity calculation
tmp_fastafilename = fastafilename + ".tmp.fasta"
num_seqs = self.write_consensus_with_mask(fastafilename, tmp_fastafilename, mask="soft")
tocleanup.append(tmp_fastafilename)
tmp_fastafile = pysam.Fastafile(tmp_fastafilename)
tocleanup.append("%s.fai"%(tmp_fastafilename))
# do global alignments with USEARCH/UCLUST.
# I don't use --cluster because it doesn't report alignments
# usearch is fast but will sometimes miss things -- I've tried to tune params as best as I can.
# and I use different parameters depending on how many input sequences there are
# Also, I use a lower %ID thresh than specified for joining because I really calculate %ID over *mapped* sequence positions.
sens_string = "--maxaccepts 8 --maxrejects 256"
uclust_id = 0.80
algorithm="-usearch_global"
# uclust_id = self.cluster_thresh - 0.05
# if em.iteration_i > 10:
# num_seqs = len([x for x in self.probN if x is not None])
assert num_seqs == len([x for x in self.probN if x is not None])
if num_seqs < 1000:
sens_string = "--maxaccepts 16 --maxrejects 256"
if num_seqs < 500:
sens_string = "--maxaccepts 32 --maxrejects 256"
if num_seqs < 150:
algorithm="-search_global"
sens_string = "--maxaccepts 0 --maxrejects 0" # slower, but more sensitive.
# if really few seqs, then no use not doing smith-waterman or needleman wunsch alignments
if num_seqs < 50:
algorithm="-search_global"
sens_string = "-fulldp"
# there is a bug in usearch threads that I can't figure out (fails with many threads). Currently limiting to max 6 threads
usearch_threads = min(6, self.n_cpus)
cmd = "usearch %s %s --db %s --id %.3f -quicksort -query_cov 0.5 -target_cov 0.5 -strand plus --userout %s.us.txt --userfields query+target+id+caln+qlo+qhi+tlo+thi -threads %d %s"%\
(algorithm,
tmp_fastafilename, tmp_fastafilename,
uclust_id,
tmp_fastafilename,
usearch_threads,
sens_string)
if self._VERBOSE:
sys.stderr.write("usearch command was:\n%s\n"%(cmd))
check_call(cmd, shell=True, stdout = sys.stdout, stderr = sys.stderr)
# read clustering file to adjust Priors and Posteriors, summing merged reference sequences
tocleanup.append("%s.us.txt"%tmp_fastafilename)
nummerged = 0
alnstring_pat = re.compile(r'(\d*)([MDI])')
already_removed = set() # seq_ids
# this is a bit slow and almost certainly could be sped up with algorithmic improvements.
times = [] # DEBUG
for row in csv.reader(file("%s.us.txt"%tmp_fastafilename), delimiter='\t'):
# each row an alignment in userout file
t0 = time()
# member == query
member_name = row[0]
seed_name = row[1]
if member_name == seed_name:
continue # usearch allows self-hits, which we don't care about
member_seq_id = self.sequence_name2sequence_i.get(member_name)
seed_seq_id = self.sequence_name2sequence_i.get(seed_name)
if member_seq_id in already_removed or seed_seq_id in already_removed:
continue
# decide if these pass the cluster_thresh *over non-gapped, mapped columns*
member_fasta_seq = tmp_fastafile.fetch(member_name)
seed_fasta_seq = tmp_fastafile.fetch(seed_name)
member_unmapped = self.unmapped_bases[member_seq_id] # unmapped positions (default prob)
seed_unmapped = self.unmapped_bases[seed_seq_id]
# query+target+id+caln+qlo+qhi+tlo+thi %s"%\
# 0 1 2 3 4 5 6 7
member_start = int(row[4]) - 1 # printed as 1-based by usearch now
seed_start = int(row[6]) - 1
t0 = time()
# print >> sys.stderr, "DEBUG", alnstring_pat.findall(row[3])
aln_columns, matches = _emirge.count_cigar_aln(tmp_fastafile.fetch(seed_name),
tmp_fastafile.fetch(member_name),
self.unmapped_bases[seed_seq_id],
self.unmapped_bases[member_seq_id],
seed_start,
member_start,
alnstring_pat.findall(row[3]))
## print >> sys.stderr, "DEBUG: %.6e seconds"%(time()-t0)# timedelta(seconds = time()-t0)
# if alignment is less than 1000 bases, or identity over those 500+ bases is not above thresh, then continue
seed_n_mapped_bases = self.unmapped_bases[seed_seq_id].shape[0] - self.unmapped_bases[seed_seq_id].sum()
member_n_mapped_bases = self.unmapped_bases[member_seq_id].shape[0] - self.unmapped_bases[member_seq_id].sum()
if (aln_columns < 500) \
or ((float(matches) / aln_columns) < self.cluster_thresh):
# or (float(aln_columns) / min(seed_n_mapped_bases, member_n_mapped_bases) < 0.9)
continue
minimum_residence_time = -1 # how many iters does a newly split out seq have to be around before it's allowed to merge again. -1 to turn this off.
member_first_appeared = self.split_seq_first_appeared.get(member_seq_id)
if member_first_appeared is not None and self.iteration_i - member_first_appeared <= minimum_residence_time:
continue
seed_first_appeared = self.split_seq_first_appeared.get(seed_seq_id)
if seed_first_appeared is not None and self.iteration_i - seed_first_appeared <= minimum_residence_time:
continue
if self._VERBOSE and num_seqs < 50:
print >> sys.stderr, "\t\t%s|%s vs %s|%s %.3f over %s aligned columns (usearch %%ID: %s)"%(member_seq_id, member_name, seed_seq_id, seed_name, float(matches) / aln_columns, aln_columns, row[2])
# if above thresh, then first decide which sequence to keep, (one with higher prior probability).
percent_id = (float(matches) / aln_columns) * 100.
t0 = time()
if self.priors[-1][seed_seq_id] > self.priors[-1][member_seq_id]:
keep_seq_id = seed_seq_id
remove_seq_id = member_seq_id
keep_name = seed_name
remove_name = member_name
else:
keep_seq_id = member_seq_id
remove_seq_id = seed_seq_id
keep_name = member_name
remove_name = seed_name
# merge priors (add remove_seq_id probs to keep_seq_id probs).
self.priors[-1][keep_seq_id] += self.priors[-1][remove_seq_id]
self.priors[-1][remove_seq_id] = 0.0
# now merge posteriors (all removed probs from remove_seq_id go to keep_seq_id).
# self.posteriors[-1] at this point is lil_matrix
# some manipulations of underlying sparse matrix data structures for efficiency here.
# 1st, do addition in csr format (fast), convert to lil format, and store result in temporary array.
new_row = (self.posteriors[-1].getrow(keep_seq_id).tocsr() + self.posteriors[-1].getrow(remove_seq_id).tocsr()).tolil()
# then change linked lists directly in the posteriors data structure -- this is very fast
self.posteriors[-1].data[keep_seq_id] = new_row.data[0]
self.posteriors[-1].rows[keep_seq_id] = new_row.rows[0]
# these two lines remove the row from the linked list (or rather, make them empty rows), essentially setting all elements to 0
self.posteriors[-1].rows[remove_seq_id] = []
self.posteriors[-1].data[remove_seq_id] = []
# set self.probN[removed] to be None -- note that this doesn't really matter, except for
# writing out probN.pkl.gz every iteration, as probN is recalculated from bam file
# with each iteration
self.probN[remove_seq_id] = None
already_removed.add(remove_seq_id)
nummerged += 1
if self._VERBOSE:
times.append(time()-t0)
sys.stderr.write("\t...merging %d|%s into %d|%s (%.2f%% ID over %d columns) in %.3f seconds\n"%\
(remove_seq_id, remove_name,
keep_seq_id, keep_name,
percent_id, aln_columns,
times[-1]))
# if len(times) and self._VERBOSE: # DEBUG
# sys.stderr.write("merges: %d\n"%(len(times)))
# sys.stderr.write("total time for all merges: %.3f seconds\n"%(numpy.sum(times)))
# sys.stderr.write("average time per merge: %.3f seconds\n"%(numpy.mean(times)))
# sys.stderr.write("min time per merge: %.3f seconds\n"%(numpy.min(times)))
# sys.stderr.write("max time per merge: %.3f seconds\n"%(numpy.max(times)))
# write new fasta file with only new sequences
if self._VERBOSE:
sys.stderr.write("Writing new fasta file for iteration %d at %s...\n"%(self.iteration_i, ctime()))
tmp_fastafile.close()
tocleanup.append("%s.fai"%(fastafilename)) # this file will change! So must remove index file. pysam should check timestamps of these!
recordstrings=""
num_seqs = 0
for record in FastIterator(file(fastafilename)): # read through file again, overwriting orig file if we keep the seq
seqname = record.title.split()[0]
seq_id = self.sequence_name2sequence_i.get(seqname)
if seq_id not in already_removed:
recordstrings += str(record) # could do a better job here of actually "merging" a new consensus, rather than just keeping one or the other.
num_seqs += 1
outfile = file(fastafilename, 'w')
outfile.write(recordstrings)
outfile.close()
# clean up. quite important, actually, to remove old fai index files.
for fn in tocleanup:
os.remove(fn)
if self._VERBOSE:
sys.stderr.write("\tremoved %d sequences after merging\n"%(nummerged))
sys.stderr.write("\tsequences remaining for iteration %02d: %d\n"%(self.iteration_i, num_seqs))
sys.stderr.write("DONE Clustering sequences for iteration %d at %s [%s]...\n"%(self.iteration_i, ctime(), timedelta(seconds = time()-start_time)))
return
def do_mapping(self, full_fasta_path, nice = None):
"""
IN: path of fasta file to map reads to
run external mapping program to produce bam file
right now this is bowtie
should also set self.n_alignments and self.current_bam_filename
"""
if self._VERBOSE:
sys.stderr.write("Starting read mapping for iteration %d at %s...\n"%(self.iteration_i, ctime()))
start_time = time()
self.do_mapping_bowtie(full_fasta_path, nice = nice)
if self._VERBOSE:
sys.stderr.write("DONE with read mapping for iteration %d at %s [%s]...\n"%(self.iteration_i, ctime(), timedelta(seconds = time()-start_time)))
return
def do_mapping_bowtie(self, full_fasta_path, nice = None):
"""
run bowtie to produce bam file for next iteration
sets self.n_alignments
sets self.current_bam_filename
"""
bowtie_index = os.path.join(self.iterdir, "bowtie.index.iter.%02d"%(self.iteration_i))
bowtie_logfile = os.path.join(self.iterdir, "bowtie.iter.%02d.log"%(self.iteration_i))
# 1. build index
cmd = "bowtie-build -o 3 %s %s > %s"%(full_fasta_path , bowtie_index, bowtie_logfile) # -o 3 for speed? magnitude of speedup untested!
# note: just send stdout to log file, as stderr captured in emirge stderr
if self._VERBOSE:
sys.stderr.write("\tbowtie-build command:\n")
sys.stderr.write("\t%s\n"%cmd)
check_call(cmd, shell=True, stdout = sys.stdout, stderr = sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
# 2. run bowtie
nicestring = ""
if nice is not None:
nicestring = "nice -n %d"%(nice)
if self.reads1_filepath.endswith(".gz"):
cat_cmd = "gzip -dc "
else:
cat_cmd = "cat "
# these are used for single reads too.
shared_bowtie_params = "--phred%d-quals -t -p %s -n 3 -l %s -e %s --best --strata --all --sam --chunkmbs 128"%(self.reads_ascii_offset, self.n_cpus, BOWTIE_l, BOWTIE_e)
minins = max((self.insert_mean - 3*self.insert_sd), self.max_read_length)
maxins = self.insert_mean + 3*self.insert_sd
output_prefix = os.path.join(self.iterdir, "bowtie.iter.%02d"%(self.iteration_i))
output_filename = "%s.PE.u.bam"%output_prefix
samtools_cmd = "samtools view -S -h -u -b -F 0x0004 -" # -F instead of piping to awk? | awk '{if ($3!="*") print }'
if self.reads2_filepath is not None:
bowtie_command = """%s %s | %s bowtie %s --minins %d --maxins %d %s -1 - -2 %s | %s > %s"""%(\
cat_cmd,
self.reads1_filepath,
nicestring,
shared_bowtie_params,
minins, maxins,
bowtie_index,
self.reads2_filepath,
samtools_cmd,
output_filename)
else: # single reads
bowtie_command = """%s %s | %s bowtie %s %s - | %s > %s"""%(\
cat_cmd,
self.reads1_filepath,
nicestring,
shared_bowtie_params,
bowtie_index,
samtools_cmd,
output_filename)
if self._VERBOSE:
sys.stderr.write("\tbowtie command:\n")
sys.stderr.write("\t%s\n"%bowtie_command)
p = Popen(bowtie_command, shell=True, stdout = sys.stdout, stderr = PIPE, close_fds=True)
p.wait()
stderr_string = p.stderr.read()
self.n_alignments = self.get_n_alignments_from_bowtie(stderr_string)
# re-print this to stdout, since we stole it from bowtie
sys.stdout.write(stderr_string)
sys.stdout.flush()
# and now put in separate bowtie logfile
of = open(bowtie_logfile, 'w')
of.write("\nBOWTIE STDERR:\n")
of.write(stderr_string)
of.write("\n")
of.close()
if self._VERBOSE:
sys.stderr.write("\tFinished Bowtie for iteration %02d at %s:\n"%(self.iteration_i, ctime()))
# 3. clean up
# check_call("samtools index %s.sort.PE.bam"%(output_prefix), shell=True, stdout = sys.stdout, stderr = sys.stderr)
if os.path.exists(bowtie_logfile):
check_call("gzip -f %s"%(bowtie_logfile), shell=True)
assert self.iterdir != '/'
for filename in os.listdir(self.iterdir):
assert(len(os.path.basename(bowtie_index)) >= 20) # weak check that I'm not doing anything dumb.
if os.path.basename(bowtie_index) in filename:
os.remove(os.path.join(self.iterdir, filename))
self.current_bam_filename = output_filename # do this last.
return
def get_n_alignments_from_bowtie(self, stderr_string):
"""
IN: stderr output string from bowtie
OUT: does some re to get number of unique reads mapped,
returns as int
##### sample stderr for paired-end: #####
Time loading reference: 00:00:00
Time loading forward index: 00:00:00
Time loading mirror index: 00:00:00
Seeded quality full-index search: 00:06:50
# reads processed: 897895
# reads with at least one reported alignment: 720465 (80.24%)
# reads that failed to align: 177430 (19.76%)
Reported 19244466 paired-end alignments to 1 output stream(s)
"""
try:
r = re.findall(r'Reported ([0-9]+) (paired-end )?alignments', stderr_string)
if r[0][1] != '': # "paired-end" string matched -- two lines in samfile per paired-end aln
return int(r[0][0])*2
else: # single-end -- one line in samfile per alignment
return int(r[0][0])
except IndexError:
print >> sys.stderr, "OOPS, we didn't get number of reads from bowtie:"
print >> sys.stderr, stderr_string
print >> sys.stderr, r
raise
def calc_likelihoods(self):
"""
sets self.likelihoods (seq_n x read_n) for this round
"""
if self._VERBOSE:
sys.stderr.write("Calculating likelihood %s for iteration %d at %s...\n"%(self.likelihoods.shape, self.iteration_i, ctime()))
start_time = time()
# first calculate self.probN from mapped reads, previous round's posteriors
self.calc_probN() # (handles initial iteration differently within this method)
# Cython function for heavy lifting.
_emirge._calc_likelihood(self)
if self._VERBOSE:
sys.stderr.write("DONE Calculating likelihood for iteration %d at %s [%s]...\n"%(self.iteration_i, ctime(), timedelta(seconds = time()-start_time)))
return
def calc_probN(self):
"""
Pr(N=n)
If read or sequence is new this round (not seen at t-1), then
there is no Pr(S|R) from previous round, so we substitute
Pr(S), the unbiased prior
If initial iteration, all reads and seqs are new, so all calcs
for Pr(N=n) use the prior as weighting factor instead of
previous round's posterior.
"""
if self._VERBOSE:
sys.stderr.write("\tCalculating Pr(N=n) for iteration %d at %s...\n"%(self.iteration_i, ctime()))
start_time = time()
# here do looping in Cython (this loop is about 95% of the time in this method on test data):
_emirge._calc_probN(self)
if self._VERBOSE:
sys.stderr.write("\tDONE calculating Pr(N=n) for iteration %d at %s [%s]...\n"%(self.iteration_i, ctime(), timedelta(seconds = time()-start_time)))
return
def calc_posteriors(self):
if self._VERBOSE:
sys.stderr.write("Calculating posteriors for iteration %d at %s...\n"%(self.iteration_i, ctime()))
t_start = time()
_emirge._calc_posteriors(self)
if self._VERBOSE:
sys.stderr.write("DONE Calculating posteriors for iteration %d at %s [%.3f seconds]...\n"%(self.iteration_i, ctime(), time() - t_start))
return
def iterations_done(self):
"""
check if we are done iterating, i.e. are the current reference sequences the same as that from the last round
returns True or False
"""
return False
def do_iterations(em, max_iter, save_every):
"""
an EM object is passed in, so that one could in theory start from a saved state
this should be moved into the EM object.
"""
bamfile_template = "bowtie.iter.%02d.PE.u.bam"
os.chdir(em.cwd)
em.max_iterations = max_iter
if em.iteration_i < 0: # first run
em.do_iteration(em.current_bam_filename, em.current_reference_fasta_filename)
while em.iteration_i < max_iter:
subdir = os.path.join(em.cwd, "iter.%02d"%(em.iteration_i))
em.do_iteration(os.path.join(subdir, bamfile_template%(em.iteration_i)),
os.path.join(subdir, "iter.%02d.cons.fasta"%(em.iteration_i)))
# currently broken. Not sure anyone was using this anyway
# if em.iteration_i > 0 and (em.iteration_i % save_every == 0):
# filename = em.save_state()
# os.system("bzip2 -f %s &"%(filename))
# clean up any global temporary files, i.e. rewritten reads files
for filename in em.temporary_files:
os.remove(filename)
# compress last mapping (which we keep around)
if os.path.exists(em.current_bam_filename) and em.current_bam_filename.endswith(".u.bam"):
sys.stderr.write("Converting last mapping file (%s) to compressed bam at %s...\n"%(os.path.basename(em.current_bam_filename), ctime()))
new_fn = em.current_bam_filename.rstrip(".u.sam")+".bam"
# p = Popen(["samtools", "view", "-h", "-b", em.current_bam_filename, "-o", new_fn], stdout = sys.stdout, stderr = sys.stderr)
p = Popen("samtools view -h -b %s > %s"%(em.current_bam_filename, new_fn), shell=True, stderr = sys.stderr)
returncode = p.wait()
if returncode == 0:
sys.stderr.write("DONE Converting last mapping file (%s) to compressed bam at %s.\n"%(os.path.basename(em.current_bam_filename), ctime()))
os.remove(em.current_bam_filename)
em.current_bam_filename = new_fn
else:
sys.stderr.write("ERROR: Could not convert last mapping file (%s) to compressed bam at %s.\n"%(os.path.basename(em.current_bam_filename), ctime()))
return
def do_initial_mapping(em, working_dir, options):
"""
IN: takes the em object, working directory and an OptionParser options object
does the initial 1-reference-per-read bowtie mapping to initialize the algorithm
OUT: path to the bam file from this initial mapping
TODO: move this to em method. A bit clumsy right now.
"""
initial_mapping_dir = os.path.join(working_dir, "initial_mapping")
if not os.path.exists(initial_mapping_dir):
os.mkdir(initial_mapping_dir)
minins = max((options.insert_mean - 3*options.insert_stddev), options.max_read_length)
maxins = options.insert_mean + 3*options.insert_stddev
bampath_prefix = os.path.join(initial_mapping_dir, "initial_bowtie_mapping.PE")
nicestring = ""
if options.nice_mapping is not None:
nicestring = "nice -n %d"%(options.nice_mapping) # TODO: fix this so it isn't such a hack and will work in non-bash shells. Need to rewrite all subprocess code, really (shell=False)
reads_ascii_offset = {False: 64, True: 33}[options.phred33]
if options.fastq_reads_1.endswith(".gz"):
option_strings = ["gzip -dc "]
else:
option_strings = ["cat "]
# shared regardless of whether paired mapping or not
option_strings.extend([options.fastq_reads_1, nicestring, reads_ascii_offset, options.processors, BOWTIE_l, BOWTIE_e])
samtools_cmd = "samtools view -S -h -u -b -F 0x0004 -" # -F instead of piping to awk? | awk '{if ($3!="*") print }'
# PAIRED END MAPPING
if options.fastq_reads_2 is not None:
option_strings.extend([minins, maxins, options.bowtie_db, options.fastq_reads_2, samtools_cmd, bampath_prefix])
cmd = """%s %s | %s bowtie --phred%d-quals -t -p %s -n 3 -l %s -e %s --best --sam --chunkmbs 128 --minins %s --maxins %s %s -1 - -2 %s | %s > %s.u.bam """%tuple(option_strings)
# SINGLE END MAPPING
else:
option_strings.extend([options.bowtie_db, samtools_cmd, bampath_prefix])
cmd = """%s %s | %s bowtie --phred%d-quals -t -p %s -n 3 -l %s -e %s --best --sam --chunkmbs 128 %s - | %s > %s.u.bam """%tuple(option_strings)
sys.stderr.write("Performing initial mapping with command:\n%s\n"%cmd)
p = Popen(cmd, shell=True, stdout = sys.stdout, stderr = PIPE, close_fds=True)
p.wait()
stderr_string = p.stderr.read()
em.n_alignments = em.get_n_alignments_from_bowtie(stderr_string)
# re-print this to stdout, since we stole it.
sys.stdout.write(stderr_string)
sys.stdout.flush()
return bampath_prefix+".u.bam"
def resume(working_dir, options):
"""
resume from a previous run.
Takes the emirge working dir, and an OptionParser options object
"""
raise NotImplementedError, "This option is currently broken, and will be fixed in a later version."
em = EM("", "", 0, 0) # reads1_filepath, reads2_filepath, insert_mean, insert_sd
data_path = os.path.join(working_dir, "iter.%02d"%(options.resume_from), 'em.%02i.data.pkl.bz2'%(options.resume_from))
sys.stdout.write("Loading saved state from %s...\n"%data_path)
em.load_state(data_path)
sys.stdout.write("Done.\n")
# if the current working dir has been copied or moved, the old state will no longer be valid,
# so need to set this again:
em.cwd = working_dir
# secretly (not advertised) options allowed to change in a resume
if options.fastq_reads_1 is not None:
em.reads1_filepath = os.path.abspath(options.fastq_reads_1)
if options.fastq_reads_2 is not None:
em.reads2_filepath = os.path.abspath(options.fastq_reads_2)
# now process any *relevant* options:
# this is broken right now because it just reverts all to defaults.
# if options.processors is not None:
# em.n_cpus = options.processors
# if options.snp_fraction_thresh is not None:
# em.snp_percentage_thresh = options.snp_fraction_thresh
# if options.variant_fraction_thresh is not None:
# em.snp_minor_prob_thresh = options.variant_fraction_thresh
# if options.join_threshold is not None:
# em.cluster_thresh = options.join_threshold
# if options.min_depth is not None:
# em.min_depth = options.min_depth
# if options.nice_mapping is not None:
# em.mapping_nice = options.nice_mapping
do_iterations(em, max_iter = options.iterations, save_every = options.save_every)
return
def dependency_check():
"""
check presense, versions of programs used in emirge
TODO: right now just checking usearch, as the command line params
and behavior are finicky and seem to change from version to
version
"""
# usearch
working_maj = 6
working_minor = 0
working_minor_minor = 203
match = re.search(r'usearch([^ ])* v([0-9]*)\.([0-9]*)\.([0-9]*)', Popen("usearch --version", shell=True, stdout=PIPE).stdout.read())
if match is None:
print >> sys.stderr, "FATAL: usearch not found in path!"
exit(0)
binary_name, usearch_major, usearch_minor, usearch_minor_minor = match.groups()
usearch_major = int(usearch_major)
usearch_minor = int(usearch_minor)
usearch_minor_minor = int(usearch_minor_minor)
if usearch_major < working_maj or \
(usearch_major == working_maj and (usearch_minor < working_minor or \
(usearch_minor == working_minor and usearch_minor_minor < working_minor_minor))):
print >> sys.stderr, "FATAL: usearch version found was %s.%s.%s.\nemirge works with version >= %s.%s.%s\nusearch has different command line arguments and minor bugs in previous versions that can cause problems."%(usearch_major, usearch_minor, usearch_minor_minor, working_maj, working_minor, working_minor_minor)
exit(0)
return
def main(argv = sys.argv[1:]):
"""
command line interface to emirge
"""
dependency_check()
parser = OptionParser(USAGE)
# REQUIRED
group_reqd = OptionGroup(parser, "Required flags",
"These flags are all required to run EMIRGE, and may be supplied in any order.")
group_reqd.add_option("-1", dest="fastq_reads_1", metavar="reads_1.fastq[.gz]",
type="string",
help="path to fastq file with \\1 (forward) reads from paired-end sequencing run, or all reads from single-end sequencing run. File may optionally be gzipped. EMIRGE expects ASCII-offset of 64 for quality scores (but see --phred33). (Note that running EMIRGE with single-end reads is largely untested. Please let me know how it works for you.)")
group_reqd.add_option("-f", "--fasta_db",
type="string",
help="path to fasta file of candidate SSU sequences")
group_reqd.add_option("-b", "--bowtie_db",
type="string",
help="precomputed bowtie index of candidate SSU sequences (path to appropriate prefix; see --fasta_db)")
group_reqd.add_option("-l", "--max_read_length",
type="int", default=0,
help="""length of longest read in input data.""")
parser.add_option_group(group_reqd)
# REQUIRED for paired end
group_reqd_PE = OptionGroup(parser, "Required flags for paired-end reads",
"These flags are required to run EMIRGE when you have paired-end reads (the standard way of running EMIRGE), and may be supplied in any order.")
group_reqd_PE.add_option("-2", dest="fastq_reads_2", metavar="reads_2.fastq",
type="string",
help="path to fastq file with \\2 (reverse) reads from paired-end run. File must be unzipped for mapper. EMIRGE expects ASCII-offset of 64 for quality scores (but see --phred33).")
group_reqd_PE.add_option("-i", "--insert_mean",
type="int", default=0,
help="insert size distribution mean.")
group_reqd_PE.add_option("-s", "--insert_stddev",
type="int", default=0,
help="insert size distribution standard deviation.")
parser.add_option_group(group_reqd_PE)
# OPTIONAL
group_opt = OptionGroup(parser, "Optional parameters",
"Defaults should normally be fine for these options in order to run EMIRGE")
group_opt.add_option("-n", "--iterations",
type="int", default=40,
help="""Number of iterations to perform. It may be necessary to use more iterations for more complex samples (default=%default)""")
group_opt.add_option("-a", "--processors",
type="int", default=1,
help="""Number of processors to use in the mapping steps. You probably want to raise this if you have the processors. (default: %default)""")
group_opt.add_option("-m", "--mapping",
type="string",
help="path to precomputed initial mapping (bam file). If not provided, an initial mapping will be run for you.")
group_opt.add_option("-p", "--snp_fraction_thresh",
type="float", default="0.04",
help="If fraction of variants in a candidate sequence exceeds this threhold, then split the candidate into two sequences for next iteration. See also --variant_fraction_thresh. (default: %default)")
group_opt.add_option("-v", "--variant_fraction_thresh",
type="float", default="0.1",
help="minimum probability of second most probable base at a site required in order to call site a variant. See also --snp_fraction_thresh. (default: %default)")
group_opt.add_option("-j", "--join_threshold",
type="float", default="0.97",
help="If two candidate sequences share >= this fractional identity over their bases with mapped reads, then merge the two sequences into one for the next iteration. (default: %default; valid range: [0.95, 1.0] ) ")
# DEPRECIATED
# group_opt.add_option("-c", "--min_depth",
# type="float",
# default = 3,
# help = "minimum average read depth below which a candidate sequence is discarded for next iteration(default: %default)")
group_opt.add_option("-c", "--min_length_coverage",
type="float",
default = 0.3,
help = "minimum fraction of the length of a candidate reference sequence that must be covered by mapped reads. If not met, a candidate sequence is discarded for the next iteration. (default: %default; valid range: (0.0, 1.0])")
group_opt.add_option("--nice_mapping",
type="int",
help="""If set, during mapping phase, the mapper will be "niced" by the Linux kernel with this value (default: no nice)""")
# group_opt.add_option("-e", "--save_every",
# type="int", default=10,
# help="""every SAVE_EVERY iterations, save the programs state. This allows you to run further iterations later starting from these save points. The program will always save its state after the final iteration. (default=%default)""")
group_opt.add_option("--phred33",
action="store_true", default=False,
help="Illumina quality values in fastq files are the (fastq standard) ascii offset of Phred+33. This is the new default for Illumina pipeline >= 1.8. DEFAULT is still to assume that quality scores are Phred+64")
# --- HIDDEN --- for debugging or special use case
# this option randomizes the priors calculated for algorithm
# initialization. Useful for testing how init affects final
# sequences and how likely we are to get stuck in a local maxima.
group_opt.add_option("--randomize_init_priors",
action="store_true", default=False,
help=SUPPRESS_HELP)
# if this flag is set, then it is assumed that N reads in input
# files are labeled with integer names from 0 to N-1, and the read
# files will not be rewritten as a first step by emirge
group_opt.add_option("--no_rewrite_reads",
action="store_true", default=False,
help=SUPPRESS_HELP)
# --- END HIDDEN ---
parser.add_option_group(group_opt)
# # RESUME
group_resume = OptionGroup(parser, "Resuming iterations",
"These options allow you to resume from a previously interrupted run. EMIRGE will look for the last good iteration and begin with the candidate SSU sequences and priors (current abundance estimates) from that iteration. Currently, there is only one option associate with resuming iterations: --resume. The following options cannot be changed from the inital command, and if supplied with --resume, are ignored: -1, -2, --fasta_db, --bowtie_db, --mapping")
group_resume.add_option("-r", "--resume", action="store_true",
help="Resume iterations with the priors and current SSU sequences from the last succesful iteration.")
# parser.add_option_group(group_resume)
# ACTUALLY PARSE ARGS
(options, args) = parser.parse_args(argv)
# minimal sanity checking of input
if len(args) !=1:
parser.error("DIR is required, and all options except DIR should have a flag associated with them (options without flags: %s)"%args)
if options.join_threshold < 0.95 or options.join_threshold > 1:
parser.error("join_threshold must be between [0.95, 1.0]. You supplied %.3f. (see --help)"%options.join_threshold)
if options.min_length_coverage is not None:
if options.min_length_coverage <= 0 or options.min_length_coverage >= 1:
parser.error("--min_length_coverage (-c) must be between (0.0, 1.0). You supplied %.3f. (see --help)"%options.min_length_coverage)
for filename_option_string in ["fastq_reads_1", "fastq_reads_2", "fasta_db"]:
filename_option = getattr(options, filename_option_string)
if filename_option is not None:
if not os.path.exists(filename_option):
parser.error("file not found for --%s: %s"%(filename_option_string, filename_option))
working_dir = os.path.abspath(args[0])
sys.stdout.write("""If you use EMIRGE in your work, please cite these manuscripts, as appropriate.
Miller CS, Baker BJ, Thomas BC, Singer SW, Banfield JF (2011)
EMIRGE: reconstruction of full-length ribosomal genes from microbial community short read sequencing data.
Genome biology 12: R44. doi:10.1186/gb-2011-12-5-r44.
Miller CS, Handley KM, Wrighton KC, Frischkorn KR, Thomas BC, Banfield JF (2013)
Short-Read Assembly of Full-Length 16S Amplicons Reveals Bacterial Diversity in Subsurface Sediments.
PloS one 8: e56018. doi:10.1371/journal.pone.0056018.\n\n""")
sys.stdout.write("imported _emirge C functions from: %s\n"%(_emirge.__file__))
sys.stdout.write("Command:\n")
sys.stdout.write(' '.join([__file__]+argv))
sys.stdout.write('\n\n')
total_start_time = time()
sys.stdout.write("EMIRGE started at %s\n"%(ctime()))
sys.stdout.flush()
# some more sanity checking of options/args
# RESUME case
if options.resume:
if not os.path.exists(working_dir):
parser.error("You specified --resume, but %s does not exist"%working_dir)
# find last good directory
pat = re.compile(r'iter.([0-9]{2,})$')
current_i = -1
# basically, this code just finds the second to last directory available and calls that the last successfully completed directory. If the sam file is not zipped, because the failure happened during zipping, it zips it up.
for lsname in sorted(os.listdir(working_dir)):
if os.isdir(lsname):
try:
this_i = int(pat.search(lsname).groups()[0])
except AttributeError: # no match
continue
if this_i > current_i:
if not os.path.exists(os.path.join(working_dir, "iter.%02"%this_i+1)):
continue
else:
pass # MARK -- need to finish
# NORMAL case
else:
# below here, means that we are handling the NEW case (as opposed to resume)
required = ["fastq_reads_1", "fasta_db", "bowtie_db", "max_read_length"]
if options.fastq_reads_2 is not None:
if options.fastq_reads_2.endswith('.gz'):
parser.error("Read 2 file cannot be gzipped (see --help)")
required.extend([ "insert_mean", "insert_stddev"])
for o in required:
if getattr(options, o) is None or getattr(options, o) == 0:
if o == 'bowtie_db':
if options.fasta_db:
parser.error("Bowtie index is missing (--bowtie_db). You need to build it before running EMIRGE\nTry:\n\nbowtie-build %s bowtie_prefix" % options.fasta_db)
else:
parser.error("Bowtie index is missing (--bowtie_db). You need to build it before running EMIRGE\nTry:\n\nbowtie-build candidate_db.fasta bowtie_prefix")
elif o == 'fasta_db':
parser.error("Fasta file for candidate database is missing. Specify --fasta_db. (try --help for more information)")
else:
parser.error("--%s is required, but is not specified (try --help)"%(o))
if not os.path.exists(working_dir):
os.mkdir(working_dir)
else:
if len(os.listdir(working_dir)) > 1: # allow 1 file in case log file is redirected here.
print >> sys.stderr, os.listdir(working_dir)
parser.error("Directory not empty: %s\nIt is recommended you run emirge in a new directory each run; delete this directory or specifiy a new one."%working_dir)
# clean up options to be absolute paths
for o in ["fastq_reads_1", "fastq_reads_2", "fasta_db", "bowtie_db", "mapping"]:
current_o_value = getattr(options, o)
if current_o_value is not None:
setattr(options, o, os.path.abspath(current_o_value))
# finally, CREATE EM OBJECT
em = EM(reads1_filepath = options.fastq_reads_1,
reads2_filepath = options.fastq_reads_2,
insert_mean = options.insert_mean,
insert_sd = options.insert_stddev,
max_read_length = options.max_read_length,
cluster_thresh = options.join_threshold,
n_cpus = options.processors,
cwd = working_dir,
reads_ascii_offset = {False: 64, True: 33}[options.phred33],
rewrite_reads = not options.no_rewrite_reads)
options.fastq_reads_1 = em.reads1_filepath # change these if necessary for do_initial_mapping.
options.fastq_reads_2 = em.reads2_filepath
# DO INITIAL MAPPING if not provided with --mapping
if options.mapping is None:
options.mapping = do_initial_mapping(em, working_dir, options)
else:
# otherwise, count number of alignments in bamfile
em.n_alignments = int(check_output(["samtools", "view", "-c", "-F",
"0x100", options.mapping],
close_fds=True))
# if >= this percentage of bases are minor alleles, split candidate sequence
em.snp_percentage_thresh = options.snp_fraction_thresh
# if prob(N) for minor allele base N is >= this threshold, call site a minor allele
em.snp_minor_prob_thresh = options.variant_fraction_thresh
if options.min_length_coverage is not None:
em.min_length_coverage = options.min_length_coverage
# em.min_depth = options.min_depth # DEPRECIATED
if options.nice_mapping is not None:
em.mapping_nice = options.nice_mapping
if options.randomize_init_priors:
print >> sys.stderr, "*"*60
print >> sys.stderr, "DEBUG: initialized priors will be randomized for testing purposes"
em.initialize_EM(options.mapping, options.fasta_db, randomize_priors = options.randomize_init_priors)
# BEGIN ITERATIONS
do_iterations(em, max_iter = options.iterations, save_every = None)
sys.stdout.write("EMIRGE finished at %s. Total time: %s\n"%(ctime(), timedelta(seconds = time()-total_start_time)))
return
if __name__ == '__main__':
main()
def f(bamfile):
t = time()
present = numpy.zeros(bamfile.nreferences, dtype=numpy.bool)
for alignedread in bamfile:
present[alignedread.tid] = 1
print timedelta(seconds = time()-t)
return present
|
epruesse/EMIRGE
|
emirge_amplicon.py
|
Python
|
gpl-3.0
| 82,875
|
[
"BLAST",
"Biopython",
"Bowtie",
"pysam"
] |
8a2fb6666455f422bc59063b72c1748d095f242ae6b5cd2b1d087b81dfb6225a
|
"""The cl.oquence kernel programming language."""
import ast as _ast # http://docs.python.org/library/ast.html
import cypy
import cypy.astx as astx
import cypy.cg as cg
version = cypy.Version("cl.oquence", (("Major", 1), ("Minor", 0)), "alpha")
"""The current :class:`version <cypy.Version>` of cl.oquence (1.0 alpha)."""
def fn(decl):
"""Create a :class:`generic cl.oquence function <GenericFn>` from a
Python function declaration.
Typical usage is as a decorator::
@clq.fn
def sum(a, b, dest):
gid = get_global_id(0)
dest[gid] = a[gid] + b[gid]
As with any decorator, the above is Python syntactic sugar for::
def sum(a, b, dest):
gid = get_global_id(0)
dest[gid] = a[gid] + b[gid]
sum = clq.fn(sum)
.. WARNING:: Functions defined on the ``python`` or ``ipython`` command
line do not retain their source, so this won't work. A bug
has been filed, and has been resolved in version 0.11 of
ipython.
http://github.com/ipython/ipython/issues/issue/120
A workaround for earlier versions of ipython is to use the
fn.from_source form described below.
To create a generic function from a string, use the ``fn.from_source``
function::
clq.fn.from_source('''
def sum(a, b, dest):
gid = get_global_id(0)
dest[gid] = a[gid] + b[gid]
''')
To create a generic function from an abstract syntax tree, use the
``fn.from_ast`` function::
clq.fn.from_ast(ast.parse('''
def sum(a, b, dest):
gid = get_global_id(0)
dest[gid] = a[gid] + b[gid]
'''))
See the :mod:`ast` module in the Python standard library for more
information on manipulating Python syntax trees. The :mod:`cypy.astx`
module provides several convenience functions for working with Python
ASTs as well.
"""
ast = astx.infer_ast(decl)
ast = astx.extract_the(ast, _ast.FunctionDef)
return GenericFn(ast)
def from_source(src):
ast = astx.infer_ast(src)
ast = astx.extract_the(ast, _ast.FunctionDef)
return GenericFn(ast)
fn.from_source = from_source
def from_ast(ast):
return GenericFn(ast)
fn.from_ast = from_ast
class GenericFn(object):
"""A generic cl.oquence function.
It is generic in the sense that its arguments have not yet been assigned
concrete types.
Generic functions are immutable and intern.
"""
def __init__(self, ast):
self.original_ast = ast
###########################################################################
# Abstract Syntax Tree
###########################################################################
@cypy.setonce(property)
def original_ast(self):
"""The original, unannotated Python abstract syntax tree for this
generic function."""
return self._ast
@original_ast.setter
def original_ast(self, value):
if not isinstance(value, _ast.FunctionDef):
raise Error(
"Root node of ast must be a FunctionDef, but got a %s." %
value.__class__.__name__)
self._ast = value
self.__name__ = value.name
self.__doc__ = _ast.get_docstring(value, clean=False)
@cypy.lazy(property)
def annotated_ast(self):
"""An annotated copy of the abstract syntax tree for this GenericFn.
See :class:`internals.GenericFnVisitor`.
"""
visitor = self._visitor = internals.GenericFnVisitor()
return visitor.visit(self.original_ast)
@cypy.lazy(property)
def arg_names(self):
"""A tuple of strings containing the names of the arguments."""
return astx.FunctionDef_co_varnames(self.original_ast)
@cypy.lazy(property)
def local_variables(self):
"""A tuple of strings containing the names of the local variables."""
return self.annotated_ast.local_variables
@cypy.lazy(property)
def all_variables(self):
"""A tuple of strings containing the names of all variables (both
arguments and local variables)."""
return self.annotated_ast.all_variables
@cypy.lazy(property)
def name(self):
"""The function's name."""
return self.annotated_ast.name
def compile(self, target, *arg_types):
"""Creates a :class:`concrete function <ConcreteFn>` with the provided
argument types."""
return ConcreteFn(self, arg_types, target)
@cypy.lazy(property)
def cl_type(self):
return self.Type(self)
cypy.intern(GenericFn)
class ConcreteFn(object):
"""A concrete function is made from a generic function by binding the
arguments to concrete types.
Concrete functions are immutable and intern.
"""
# TODO: review this
def __init__(self, generic_fn, arg_types, backend):
self.generic_fn = generic_fn
self.arg_types = arg_types
self.backend = backend
self.arg_map = cypy.frozendict(zip(generic_fn.arg_names, arg_types))
@cypy.setonce(property)
def generic_fn(self):
"""The generic function that this concrete function is derived from."""
return self._generic_fn
@generic_fn.setter
def generic_fn(self, val):
self._generic_fn = val
@cypy.setonce(property)
def arg_types(self):
"""A sequence of types for each of the arguments to this function."""
return self._arg_types
@arg_types.setter
def arg_types(self, val):
self._arg_types = val
@cypy.setonce(property)
def backend(self):
"""The backend language."""
return self._backend
@backend.setter
def backend(self, val):
self._backend = val
@cypy.lazy(property)
def typed_ast(self):
"""The typed abstract syntax tree for this function."""
backend = self.backend
visitor = self._visitor = internals.ConcreteFnVisitor(self, backend)
return visitor.visit(self._generic_fn.annotated_ast)
@cypy.lazy(property)
def program_items(self):
"""A list of all program items needed by this concrete function."""
return tuple(self.typed_ast.context.program_items)
@cypy.lazy(property)
def program_item(self):
"""The program item corresponding to this function."""
return self.typed_ast.context.program_item
@cypy.lazy(property)
def return_type(self):
"""The return type of this function."""
return self.typed_ast.context.return_type
@cypy.lazy(property)
def name(self):
"""The fully-mangled name of this function."""
return self.program_item.name
@cypy.lazy(property)
def cl_type(self):
return self.Type(self)
cypy.intern(ConcreteFn)
class Type(object):
"""Base class for cl.oquence types."""
def __init__(self, name):
self.name = name
def __str__(self):
return "<clq.Type <%s>>" % self.name
def __repr__(self):
return str(self)
def observe(self, context, node):
"""Called when this type has been assigned to an expression, given by
``node``."""
pass
def resolve_Attribute(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support attribute access." %
self.name, node.value)
def generate_Attribute(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support attribute access." %
self.name, node.value)
def resolve_Subscript(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support subscript access." %
self.name, node.value)
def generate_Subscript(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support subscript access." %
self.name, node.value)
def resolve_UnaryOp(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support any unary operations." %
self.name, node.operand)
def generate_UnaryOp(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support any unary operations." %
self.name, node.operand)
def resolve_BinOp(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support any binary operations." %
self.name, node.left)
def generate_BinOp(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support any binary operations." %
self.name, node.left)
def resolve_Compare(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support comparisons." %
self.name, node.left)
def generate_Compare(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support comparisons." %
self.name, node.left)
def resolve_BoolOp(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support any boolean operations." %
self.name, node.values[0])
def generate_BoolOp(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support any boolean operations." %
self.name, node.values[0])
def resolve_Call(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support the call operation." %
self.name, node.func)
def generate_Call(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support the call operation." %
self.name, node.func)
def validate_Return(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support the 'return' statement." %
self.name, node.value)
def generate_Return(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support the 'return' statement." %
self.name, node.func)
def resolve_MultipleAssignment(self, context, prev, new, node):
new_type = new.resolve(context)
if self == new_type:
return new_type
else:
raise TypeResolutionError(
"Multiple assignment with incompatible types: %s, %s." %
(self.name, new_type.name), node)
def validate_Assign(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support assignment to an identifier." %
self.name, node.target)
def generate_Assign(self, context, node):
context.backend.generate_Assign(context, node)
def validate_AssignAttribute(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support assignment to an attribute." %
self.name, node.targets[0].value)
def generate_AssignAttribute(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support assignment to an attribute." %
self.name, node.targets[0].value)
def validate_AssignSubscript(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support assignment to a subscript." %
self.name, node.targets[0].value)
def generate_AssignSubscript(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support assignment to a subscript." %
self.name, node.targets[0].value)
def validate_AugAssign(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support augmented assignment to an identifier." %
self.name, node.target)
def generate_AugAssign(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support augmented assignment to an identifier." %
self.name, node.target)
def validate_AugAssignAttribute(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support augmented assignment to an attribute." %
self.name, node.target.value)
def generate_AugAssignAttribute(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support augmented assignment to an attribute." %
self.name, node.target.value)
def validate_AugAssignSubscript(self, context, node):
raise TypeResolutionError(
"Type '%s' does not support augmented assignment to a subscript." %
self.name, node.target.value)
def generate_AugAssignSubscript(self, context, node):
raise CodeGenerationError(
"Type '%s' does not support augmented assignment to a subscript." %
self.name, node.target.value)
class VirtualType(Type):
"""Designates a type that does not have a concrete representation (e.g.
singleton function types)."""
def _generic_generate_Call(context, node):
arg_types = tuple(arg.unresolved_type.resolve(context)
for arg in node.args)
args = tuple(context.visit(arg)
for i, arg in enumerate(node.args)
if not isinstance(arg_types[i], VirtualType))
func = context.visit(node.func)
code = (func.code, "(",
cypy.join((arg.code for arg in args), ", "),
")")
return astx.copy_node(node,
args=args,
func=func,
code=code)
class GenericFnType(VirtualType):
"""Each generic function uniquely inhabits a GenericFnType."""
def __init__(self, generic_fn):
VirtualType.__init__(self, generic_fn.name)
self.generic_fn = generic_fn
def resolve_Call(self, context, node):
arg_types = tuple(arg.unresolved_type.resolve(context)
for arg in node.args)
concrete_fn = self.generic_fn.compile(context.backend, *arg_types)
return concrete_fn.return_type
def generate_Call(self, context, node):
r = _generic_generate_Call(context, node)
arg_types = tuple(arg.unresolved_type.resolve(context)
for arg in node.args)
concrete_fn = self.generic_fn.compile(context.backend, *arg_types)
cypy.extend_front(context.program_items, concrete_fn.program_items)
return r
cypy.intern(GenericFnType)
GenericFn.Type = GenericFnType
class ConcreteFnType(VirtualType):
"""Each concrete function uniquely inhabits a ConcreteFnType."""
def __init__(self, concrete_fn):
VirtualType.__init__(self, concrete_fn.name)
self.concrete_fn = concrete_fn
def resolve_Call(self, context, node):
arg_types = tuple(arg.unresolved_type.resolve(context)
for arg in node.args)
concrete_fn = self.concrete_fn
fn_arg_types = concrete_fn.arg_types
if arg_types != fn_arg_types:
raise TypeResolutionError(
"Argument types are not compatible. Got %s, expected %s." %
(str(arg_types), str(fn_arg_types)), node)
return concrete_fn.return_type
def generate_Call(self, context, node):
r = _generic_generate_Call(context, node)
cypy.extend_front(context.program_items, self.concrete_fn.program_items)
return r
cypy.intern(ConcreteFnType)
ConcreteFn.Type = ConcreteFnType
class Backend(object):
"""Abstract base class for a backend language specification."""
def __init__(self, name):
self.name = name
self.program_items = cypy.SetList([])
def init_context(self, context):
"""Initializes a :class:`context <Context>`."""
pass
def generate_program_item(self, context):
"""Called to generate a :class:`program item <ProgramItem>` for a
completed concrete function described by the provided context.
The return value is automatically added assigned to the
context.program_item attribute and added to context.program_items.
"""
raise Error("Backend must provide a method to generate program items.")
def add_program_items(self, items):
"""Called to add the :class:`program items <ProgramItem>` generated
by compiling a concrete function to the global list of program items."""
self.program_items.extend(items)
def void_type(self, context, node):
raise TypeResolutionError(
"Backend does not specify a void type.", node)
def resolve_Num(self, context, node):
raise TypeResolutionError(
"Backend cannot handle raw numeric literals.", node)
def generate_Num(self, context, node):
raise CodeGenerationError(
"Backend cannot handle raw numeric literals.", node)
def resolve_Str(self, context, node):
raise TypeResolutionError(
"Backend cannot handle raw string literals.", node)
def generate_Str(self, context, node):
raise CodeGenerationError(
"Backend cannot handle raw string literals.", node)
def generate_For(self, context, node):
raise CodeGenerationError(
"Backend does not support 'for' loops.", node)
def generate_While(self, context, node):
raise CodeGenerationError(
"Backend does not support 'while' loops.", node)
def generate_If(self, context, node):
raise CodeGenerationError(
"Backend does not support 'if' statements.", node)
def generate_IfExp(self, context, node):
raise CodeGenerationError(
"Backend does not support 'if' expressions.", node)
def generate_Expr(self, context, node):
raise CodeGenerationError(
"Backend does not support standalone expressions.", node)
def generate_Pass(self, context, node):
raise CodeGenerationError(
"Backend does not support the 'pass' statement.", node)
def generate_Break(self, context, node):
raise CodeGenerationError(
"Backend does not support the 'break' statement.", node)
def generate_Continue(self, context, node):
raise CodeGenerationError(
"Backend does not support the 'continue' statement.", node)
def generate_Exec(self, context, node):
raise CodeGenerationError(
"Backend does not support the 'exec' statement.", node)
def generate_op(self, context, node):
raise CodeGenerationError(
"Backend does not support operators.", node)
class Context(object):
"""Contains contextual information that is used during type resolution
and code generation.
User-defined types may read and write to the context, although care should
be taken to ensure that naming conflicts do not arise.
"""
def __init__(self, visitor, concrete_fn, backend):
self.visitor = visitor
self.concrete_fn = concrete_fn
self.backend = backend
self.generic_fn = concrete_fn.generic_fn
self.body = [ ]
self.stmts = [ ]
self.program_items = cypy.SetList()
# used to provide base case for resolving multiple assignments
self._resolving_name = None
self._multiple_assignment_prev = { }
backend.init_context(self)
def visit(self, node):
return self.visitor.visit(node)
def observe(self, clq_type, node):
clq_type.observe(self, node)
return clq_type
tab = staticmethod(cg.CG.tab)
untab = staticmethod(cg.CG.untab)
class ProgramItem(object):
"""Represents a top-level item in the generated source code."""
def __init__(self, name, code):
self.name = name
self.code = code
name = None
"""The name of the item, if it has a name, or None."""
code = None
"""The source code associated with this item."""
class Error(Exception):
"""Base class for errors in cl.oquence."""
class InvalidOperationError(Error):
"""Raised if an invalid operation is observed in a generic function."""
def __init__(self, message, node):
self.message = message
self.node = node
class TypeResolutionError(Error):
"""Raised to indicate an error during type resolution."""
def __init__(self, message, node):
self.message = message
self.node = node
def __str__(self):
return "TypeResolutionError(%s)" % self.message
class CodeGenerationError(Error):
"""Raised to indicate an error during code generation."""
def __init__(self, message, node):
self.message = message
self.node = node
# placed at the end because the internals use the definitions above
import internals
|
cyrus-/ace
|
clq/__init__.py
|
Python
|
lgpl-3.0
| 21,497
|
[
"VisIt"
] |
11f1cf5b296cd8e2c729a02f9baed0f82a4e8bf2141aed5295f2e7dc90e5e9db
|
import os, sys, traceback
import xml.etree.ElementTree as xml
##
# Stores error information needed for printing diff messages
class XMLError(object):
##
# Constructor.
# @param err A one line error message
# @param msg A detailed message describing the error
def __init__(self, err, msg):
self.error = err
self.message = msg
##
# A class for finding difference between XML documents
class XMLDiffer(object):
##
# Constructor.
# @param file1 The master (gold) file to check against
# @param file2 The file to compare to the master file
#
# Optional Arguments:
# abs_zero: Any value less than this is assumed zero (default: 1e-11)
# rel_tol: Relative tolerance to check numeric values against (default: 5.5e-6)
# max_values: The maximum number of values to test
def __init__(self, file1, file2, **kwargs):
# Store the file names
self._file = [file1, file2]
# Extract the optional arguments
self._abs_zero = float(kwargs.pop('abs_zero', 1e-11))
self._rtol = float(kwargs.pop('rel_tol', 5.5e-6))
self._ignored_attributes = kwargs.pop('ignored_attributes', [])
# Storage for XMLError objects
self._errors = []
# Extract the XML tree from the files
self._root1 = self._extractXML(file1)
self._root2 = self._extractXML(file2)
# Perform the comparison
self._compare()
##
# Check the comparison status (public)
# Returns True if the comparison fails
def fail(self):
return len(self._errors) > 0
##
# Print the error message(s) (public)
# @return The output as a single string
def message(self, **kwargs):
# Header
output = []
output.append('Running XMLDiffer.py')
output.append( ' File 1: ' + self._file[0])
output.append( ' File 2: ' + self._file[1])
output.append( ' rel_tol: ' + str(self._rtol))
output.append( ' abs_zero: ' + str(self._abs_zero))
output.append( ' No. of errors: ' + str(len(self._errors)))
# Errors
cnt = 0
for e in self._errors:
cnt += 1
output.append('\n')
output.append('ERROR ' + str(cnt) + ':')
output.append(' ' + e.error)
for m in e.message:
output.append(' ' + m)
# Print the output
if kwargs.pop('output', False):
print '\n'.join(output)
# Return the text, as a single string
return '\n'.join(output)
##
# Add an XMLError object to the storage vector (private)
# @param err A string containing the error message or an XMLError object
# @param msg A detailed message for the error (ignored if XMLError is passed to err)
def _addError(self, err, msg=[]):
# Add object directly
if isinstance(err, XMLError):
self._errors.append(err)
# Create and add the object
else:
obj = XMLError(err, msg)
self._errors.append(obj)
##
# Reads the XML file (private)
# @param filename The name of the XML file to read
# @return An xml.etree.ElementTree root object
def _extractXML(self, filename):
# Check for file existence
if not os.path.isfile(filename):
self._addError('Could not open ' + filename + ', the file does not exist.')
return None
# Get the root object of the XML tree
try:
root = xml.parse(filename).getroot()
# Catch parser errors
except xml.ParseError:
err = 'An XML parser error occurred attempting to read XML tree from ' + filename + '.'
msg = traceback.format_exc().splitlines()
self._addError(err, msg)
root = None
# Return the object
return root
##
# Perform the block by block comparison (private)
def _compare(self):
# Define local variables
root = [self._root1, self._root2]
name = ['file 1', 'file 2']
# Do nothing if the objects do not exist
if root[0] == None or root[1] == None:
return
# Loop through each tree object in the master file
for elem0 in root[0].getiterator():
# Initialize the result and error storage
results = []
errors = []
# Loop through all blocks in the second file with the current tag
for elem1 in root[1].getiterator(elem0.tag):
# Perform the comparison
r, e = self._compareBlock(elem0, elem1)
# Append the test results
results.append(r)
errors.append(e)
# If all results are False, there was no match
if not any(results):
# Filter out errors (elem.text failure)
errors = filter(None, errors)
# If no errors exist there was no block or block with identical attributes located
if len(errors) == 0:
msg = self._getAttrib(elem0)
if len(msg) == 0:
err = 'Unable to locate an XML Block with the tag "' + elem0.tag + '" in file 2.'
self._addError(err)
else:
err = 'Unable to locate an XML Block with the tag "' + elem0.tag + '" and the following attributes in file 2.'
self._addError(err, msg)
# Had a text error within similar blocks
else:
for e in errors:
self._addError(e)
##
# Compares XML blocks (private)
# This function first compares the XML block attributes, if those match
# then the XML text is also compared.
# @param elem0 The master XML element object
# @param elem1 The XML element object to compare the master against
# @return A pair containing the test result (True or False) and an error indicator,
# this 'indicator' is None if the result of the match is True or if the
# attributes fail to match. When the text fails to match then it contains
# the XMLError object.
def _compareBlock(self, elem0, elem1):
# Perform attribute comparison in both directions: ensure that
# every attribute in the gold file is in the output file, and
# vice-versa.
test_attrib = self._compareAttributes(elem0, elem1) and self._compareAttributes(elem1, elem0)
# If the attributes match, compare the text and return those results
if test_attrib:
test_text, err = self._compareText(elem0, elem1)
return test_text, err
# Otherwise the attributes do match
else:
return False, None
##
# Perform attribute comparison (private)
# @param elem0 The master XML element object
# @param elem1 The XML element object to compare the master against
def _compareAttributes(self, elem0, elem1):
# Initialize the output (assume match)
result = True
# Loop through each attribute of the master object
for key0, value0 in elem0.attrib.iteritems():
# If this key is one of the attributes we're ignoring, then ignore it!
if key0 in self._ignored_attributes:
continue
# Attribute is missing from the slave object, match fails
if not elem1.attrib.has_key(key0):
return False
# If the slave object has the same attribute, perform a comparison
elif elem1.attrib.has_key(key0):
value1 = elem1.attrib[key0]
# Attempt to perform a numeric comparison
try:
tvalue, rel_diff = self._isClose(value0, value1)
if not tvalue:
return False
except:
if value0 != value1:
return False
# Return the results
return result
## Perform comparison of text for two XML blocks (private)
# @param elem0 The master XML element object
# @param elem1 The XML element object to compare the master against
# @return A pair of items, either True, None or False, XMLError
def _compareText(self, elem0, elem1):
# Initialize the output
result = True
err = None
# Return if no text exists
if elem0.text == None and elem1.text == None:
return (result, err)
elif elem0.text == None or elem1.text == None:
return (False, err)
# Convert the text to a list of strings
text0 = elem0.text.replace('\n', '').strip().split(' ')
text1 = elem1.text.replace('\n', '').strip().split(' ')
text0 = filter(None, text0)
text1 = filter(None, text1)
# Check that the lengths are the same
if len(text0) != len(text1):
result = False
err = 'An XML block with the tag "' + elem0.tag + '" and the following attributes exists in both files, but the blocks have a different number of values.'
msg = self._getAttrib(elem0)
msg.append('No. items file 1: ' + '%d' % len(text0))
msg.append('No. items file 2: ' + '%d' % len(text1))
err = XMLError(err, msg)
return (False, err)
for i in xrange(len(text0)):
value, rel_diff = self._isClose(text0[i], text1[i])
if not value:
err = 'An XML block with the tag "' + elem0.tag + '" and the following attributes has differing values on file 2.'
msg = self._getAttrib(elem0)
msg.append('Index ' + str(i) + ' : ' + text0[i] + ' ~ ' + text1[i] + ', rel diff: ' + '%e' % rel_diff)
err = XMLError(err, msg)
return (False, err)
return result, err
##
# Perform relative tolerance check between two numbers (private)
# @param value0 A string or list of strings containing the first number
# @param value1 A string or list of strings containing the second number
def _isClose(self, value0, value1):
# Return values
result = True
rel_diff = 0
# Convert the strings to floats
value0 = float(value0)
value1 = float(value1)
# Apply the absolute zeros
if abs(value0) < self._abs_zero:
value0 = 0
if abs(value1) < self._abs_zero:
value1 = 0
# Check for zero
if value0 == 0 and value1 == 0:
result = True
# Check the relative error
else:
rel_diff = abs( ( value0 - value1 ) / max( abs(value0), abs(value1) ) )
if rel_diff > self._rtol:
result = False
# Return the comparison
return result, rel_diff
##
# Get the attributes (dict) as a string (private)
# @return Attribute message string
def _getAttrib(self, elem):
msg = []
for k, v in elem.attrib.iteritems():
msg.append(' ' + k + ' = ' + v)
return msg
if __name__ == '__main__':
# You can run XMLDiffer.py as a stand-alone by putting two XML files
# in the variable names file1 and file2 below, and then running:
#
# python $MOOSE_DIR/python/TestHarness/XMLDiffer.py
file1 = os.path.join(os.getenv('MOOSE_DIR'), 'test', 'tests', 'outputs', 'vtk', 'vtk_diff_serial_mesh_parallel_out_005.pvtu')
file2 = os.path.join(os.getenv('MOOSE_DIR'), 'test', 'tests', 'outputs', 'vtk', 'gold', 'vtk_diff_serial_mesh_parallel_out_005.pvtu')
d = XMLDiffer(file1, file2, ignored_attributes=['header_type'])
if not d.fail():
print 'Files are the same\n'
else:
print d.message()
|
stimpsonsg/moose
|
python/TestHarness/XMLDiffer.py
|
Python
|
lgpl-2.1
| 11,919
|
[
"VTK"
] |
d7c205dc185ef60d4bd18aa6b9a7c18a98a111dcbe8f143b4ab5a26559aa5870
|
# -*- coding: utf-8 -*-
#
# Copyright 2008 - 2019 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import getopt
import inspect
import logging
import os, os.path
import multiprocessing
import pickle
import sys, traceback
from pythics.settings import _TRY_PYSIDE
try:
if not _TRY_PYSIDE:
raise ImportError()
import PySide2.QtCore as _QtCore
import PySide2.QtGui as _QtGui
import PySide2.QtWidgets as _QtWidgets
import PySide2.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.Signal
Slot = QtCore.Slot
Property = QtCore.Property
USES_PYSIDE = True
except ImportError:
import PyQt5.QtCore as _QtCore
import PyQt5.QtGui as _QtGui
import PyQt5.QtWidgets as _QtWidgets
import PyQt5.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.pyqtSignal
Slot = QtCore.pyqtSlot
Property = QtCore.pyqtProperty
USES_PYSIDE = False
import pythics.html
import pythics.libcontrol
import pythics.parent
#
# Application top level window
# one for the whole application
# parent of all TabFrame instances
#
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent_process, app, parent=None, compact=False):
super(MainWindow, self).__init__(parent)
# pythics data
self.parent_process = parent_process
self.app = app
self.compact = compact
self.fixed_tabs = compact
self.workspace = ''
self.shutdown_on_exit = False
# setup window basics
#self.resize(900, 560)
# match raspberry pi touchscreen size
self.resize(800, 480)
self.setWindowTitle('Pythics')
self.clipboard = QtWidgets.QApplication.clipboard()
# set the corner icon
icon = QtGui.QIcon(os.path.join(sys.path[0], 'pythics_icon.ico'))
self.setWindowIcon(icon)
# add the menu bar
self.new_menu_bar()
# fill in the main window
self.new_tab_frame()
# add the status bar
self.new_status_bar()
# for printing later
self.printer = QtPrintSupport.QPrinter()
def confirm_exit(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to exit?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
return True
else:
return False
def confirm_close(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to close the app?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
return True
else:
return False
def closeEvent(self, event):
# called when the close button on the window is pushed
if self.confirm_exit():
self.shutdown()
event.accept()
else:
event.ignore()
def new_status_bar(self):
if not self.compact:
self.status_text = QtWidgets.QLabel('')
self.statusBar().addWidget(self.status_text, 1)
def set_status_text(self, value):
if not self.compact:
self.status_text.setText(value)
def new_tab_frame(self):
self.tab_frame = QtWidgets.QTabWidget()
self.tab_frame.setDocumentMode(True)
self.tab_frame.setTabsClosable(not self.fixed_tabs)
self.tab_frame.setMovable(not self.fixed_tabs)
self.tab_frame.currentChanged.connect(self.redraw)
self.tab_frame.tabCloseRequested.connect(self.close_tab)
self.setCentralWidget(self.tab_frame)
def redraw(self, index):
if self.tab_frame.widget(index) == None:
title = 'Pythics'
else:
title = self.tab_frame.widget(index).title
self.tab_frame.widget(index).redraw()
self.setWindowTitle(title)
def get_active_tab(self):
return self.tab_frame.currentWidget()
def close_tab(self, i):
if self.confirm_close():
self.tab_frame.widget(i).close()
self.tab_frame.removeTab(i)
if self.tab_frame.count() == 0:
self.disable_menu_items()
def get_open_filename(self, name_filter='*.*', directory='', title='Select a file to open'):
filename = QtWidgets.QFileDialog.getOpenFileName(self, title, directory, name_filter)[0]
if filename == '':
raise IOError('No file selected.')
return filename
def get_save_filename(self, name_filter='*.*', directory='', title='Select a filename for saving'):
filename = QtWidgets.QFileDialog.getSaveFileName(self, title, directory, name_filter)[0]
if filename == '':
raise IOError('No filename selected.')
return filename
def add_menu(self, name):
self.last_menu = self.menuBar().addMenu(name)
return self.last_menu
def add_menu_item(self, item_string, item_function, shortcut=0, tip=''):
action = self.last_menu.addAction(item_string, item_function, shortcut)
action.setStatusTip(tip)
return action
def add_menu_seperator(self):
self.last_menu.addSeparator()
def new_menu_bar(self):
# File menu
self.file_menu = self.add_menu('&File')
self.add_menu_item('&Open...', self.menu_open, 'Ctrl+O',
'Open an app file.')
self.add_menu_item('&Close', self.menu_close, 'Ctrl+W',
'Close the current app.')
self.add_menu_item('Close All', self.menu_close_all, 0,
'Close all open files.')
self.add_menu_item('&Reload', self.menu_reload, 'Ctrl+R',
'Reload the app.')
self.add_menu_seperator()
self.add_menu_item('Open Workspace...', self.menu_open_workspace, 0,
'Open a group of files (a workspace).')
self.add_menu_item('Save Workspace', self.menu_save_workspace, 0,
'Save open workspace.')
self.add_menu_item('Save Workspace As...', self.menu_save_workspace_as,
0, 'Save open files as a workspace.')
self.add_menu_seperator()
self.add_menu_item('Page Set&up...', self.menu_page_setup, 0,
'Page setup for printing.')
self.add_menu_item('Print Pre&view', self.menu_print_preview, 0,
'Preview pages to be printed.')
self.add_menu_item('&Print...', self.menu_print, 0,
'Print the current html.')
self.add_menu_seperator()
self.add_menu_item('E&xit', self.menu_quit, 0, 'Quit Pythics')
# Edit menu
self.edit_menu = self.add_menu('&Edit')
self.add_menu_item('Cu&t', self.menu_cut, 'Ctrl+X',
'Cut text to clipboard.')
self.add_menu_item('&Copy', self.menu_copy, 'Ctrl+C',
'Copy text to clipboard.')
self.add_menu_item('&Paste', self.menu_paste, 'Ctrl+V',
'Paste text from clipboard.')
self.add_menu_item('Delete', self.menu_delete, 0,
'Delete selected text.')
# Parameters menu
self.param_menu = self.add_menu('&Parameters')
self.add_menu_item('Load Defaults', self.menu_load_parameters_defaults,
0, 'Load default parameters.')
self.add_menu_item('Load...', self.menu_load_parameters, 0,
'Load parameter file')
self.add_menu_seperator()
self.add_menu_item('Save As Defaults',
self.menu_save_parameters_as_defaults,
0, 'Save parameters to default location.')
self.add_menu_item('Save As...', self.menu_save_parameters_as, 0,
'Save parameter file.')
# Help menu
if not self.fixed_tabs:
self.help_menu = self.add_menu('&Help')
self.add_menu_item('About Pythics...', self.menu_about,
0, '')
self.add_menu_item('Open Help', self.menu_help,
0, '')
self.disable_menu_items()
def disable_menu_items(self):
if self.fixed_tabs:
self.file_menu.actions()[0].setEnabled(False)
self.file_menu.actions()[5].setEnabled(False)
# disable menu items that require an open tab
self.file_menu.actions()[1].setEnabled(False)
self.file_menu.actions()[2].setEnabled(False)
self.file_menu.actions()[3].setEnabled(False)
self.file_menu.actions()[6].setEnabled(False)
self.file_menu.actions()[7].setEnabled(False)
self.file_menu.actions()[10].setEnabled(False)
self.file_menu.actions()[11].setEnabled(False)
self.param_menu.actions()[0].setEnabled(False)
self.param_menu.actions()[1].setEnabled(False)
self.param_menu.actions()[3].setEnabled(False)
self.param_menu.actions()[4].setEnabled(False)
def enable_menu_items(self):
# enable menu items that require an open tab
if self.fixed_tabs:
self.file_menu.actions()[3].setEnabled(True)
self.file_menu.actions()[10].setEnabled(True)
self.file_menu.actions()[11].setEnabled(True)
self.param_menu.actions()[0].setEnabled(True)
self.param_menu.actions()[1].setEnabled(True)
self.param_menu.actions()[3].setEnabled(True)
self.param_menu.actions()[4].setEnabled(True)
else:
self.file_menu.actions()[1].setEnabled(True)
self.file_menu.actions()[2].setEnabled(True)
self.file_menu.actions()[3].setEnabled(True)
self.file_menu.actions()[6].setEnabled(True)
self.file_menu.actions()[7].setEnabled(True)
self.file_menu.actions()[10].setEnabled(True)
self.file_menu.actions()[11].setEnabled(True)
self.param_menu.actions()[0].setEnabled(True)
self.param_menu.actions()[1].setEnabled(True)
self.param_menu.actions()[3].setEnabled(True)
self.param_menu.actions()[4].setEnabled(True)
def menu_open(self):
try:
filename = self.get_open_filename('xml (*.htm *.html *.xml)')
except IOError:
pass
else:
self.open_html_file(filename)
def menu_close(self):
if self.confirm_close():
self.get_active_tab().close()
self.tab_frame.removeTab(self.tab_frame.currentIndex())
if self.tab_frame.count() == 0:
self.disable_menu_items()
def menu_close_all(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to close all tabs?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
while self.tab_frame.count() > 0:
self.get_active_tab().close()
self.tab_frame.removeTab(self.tab_frame.currentIndex())
if self.tab_frame.count() == 0:
self.disable_menu_items()
def menu_quit(self):
if self.confirm_exit():
self.shutdown()
self.app.quit()
if self.shutdown_on_exit:
os.system("shutdown -h now")
def menu_reload(self):
reply = QtWidgets.QMessageBox.question(self, 'Confirm',
'Are you sure you want to reload the app?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
tab_window = self.get_active_tab()
title = tab_window.reload_file()
index = self.tab_frame.currentIndex()
self.tab_frame.setTabText(index, title)
def menu_open_workspace(self):
try:
filename = self.get_open_filename('pickle file (*.pkl *.txt)')
except IOError:
pass
else:
self.open_workspace(filename)
self.workspace = filename
self.enable_menu_items()
def menu_save_workspace(self):
if self.workspace == '':
try:
filename = self.get_save_filename('*.pkl')
except IOError:
pass
else:
self.save_workspace(filename)
self.workspace = filename
else:
self.save_workspace(filename=self.workspace)
def menu_save_workspace_as(self):
try:
filename = self.get_save_filename('*.pkl')
except IOError:
pass
else:
self.save_workspace(filename)
self.workspace = filename
def menu_page_setup(self):
dialog = QtPrintSupport.QPageSetupDialog(self.printer)
dialog.exec_()
def menu_print_preview(self):
dialog = QtPrintSupport.QPrintPreviewDialog(self.printer)
dialog.paintRequested.connect(self.print_current_tab)
dialog.exec_()
def menu_print(self):
dialog = QtPrintSupport.QPrintDialog(self.printer)
dialog.setWindowTitle('Print Document')
if dialog.exec_() == QtWidgets.QDialog.Accepted:
self.set_status_text('Printing...')
self.print_current_tab(self.printer)
self.set_status_text('')
def print_current_tab(self, printer):
scroll_area = self.get_active_tab()
# overall scale: set to fill width of page
page_width = printer.pageRect().width()
hsb = scroll_area.horizontalScrollBar()
frame_width = hsb.maximum() + hsb.pageStep()
scale = float(page_width)/float(frame_width)
x_offset = 0
sb = scroll_area.verticalScrollBar()
y_offset = - scale*sb.sliderPosition()
# # direct printing - comes out fuzzy
# painter = QtGui.QPainter(printer)
# painter.setRenderHints(QtGui.QPainter.Antialiasing
# | QtGui.QPainter.TextAntialiasing
# | QtGui.QPainter.SmoothPixmapTransform, True)
# painter.translate(x_offset, y_offset)
# painter.scale(scale, scale)
# scroll_area.frame.render(painter, QtCore.QPoint())
# painter.end()
# indirect printing: print to picture and then to printer
# for sharper output from many controls
# first draw to the QPicture
picture = QtGui.QPicture()
picture_painter = QtGui.QPainter(picture)
picture_painter.translate(x_offset, y_offset)
picture_painter.scale(scale, scale)
scroll_area.frame.render(picture_painter, QtCore.QPoint(0, 0))
picture_painter.end();
# then draw the QPicture to the printer
painter = QtGui.QPainter(printer)
painter.drawPicture(QtCore.QPoint(0, 0), picture);
painter.end()
def menu_cut(self):
w = self.app.focusWidget()
try:
w.cut()
except:
pass
def menu_copy(self):
w = self.app.focusWidget()
try:
w.copy()
except:
pass
def menu_paste(self):
w = self.app.focusWidget()
try:
w.paste()
except:
pass
def menu_delete(self):
w = self.app.focusWidget()
t = self.clipboard.text()
try:
w.cut()
except:
pass
self.clipboard.setText(t)
def menu_load_parameters_defaults(self):
tab_window = self.get_active_tab()
reply = QtWidgets.QMessageBox.question(self, 'Confirm Load Parameters',
'Are you sure you want to replace current parameters?',
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
tab_window.load_parameters(default=True)
def menu_load_parameters(self):
self.get_active_tab().load_parameters()
def menu_save_parameters_as_defaults(self):
self.get_active_tab().save_parameters(default=True)
def menu_save_parameters_as(self):
self.get_active_tab().save_parameters()
def shutdown(self):
# stop all action threads then exit
self.set_status_text('Waiting for threads and subprocesses to die...')
self.parent_process.stop()
def open_html_file(self, filename):
self.tab_frame.setUpdatesEnabled(False)
new_tab_window = TabHtmlWindow(self, self.parent_process)
# set current working directory
directory = os.path.dirname(filename)
if directory != '':
os.chdir(directory)
title = new_tab_window.open_file(filename)
index = self.tab_frame.addTab(new_tab_window, title)
self.tab_frame.setCurrentIndex(index)
self.tab_frame.setUpdatesEnabled(True)
self.enable_menu_items()
def open_workspace(self, filename):
with open(filename, 'r') as file:
file_list = pickle.load(file)
for f in file_list:
# set current working directory
os.chdir(os.path.dirname(f))
# open the file
self.open_html_file(f)
self.enable_menu_items()
def save_workspace(self, filename):
tf = self.tab_frame
l = list([])
initial_index = tf.currentIndex()
n_pages = tf.count()
self.tab_frame.setUpdatesEnabled(False)
for i in range(n_pages):
tf.setCurrentIndex(i)
html_filename = tf.currentWidget().html_file
l.append(html_filename)
with open(filename, 'w') as file:
pickle.dump(l, file, 0)
tf.setCurrentIndex(initial_index)
self.tab_frame.setUpdatesEnabled(True)
def menu_about(self):
QtWidgets.QMessageBox.about(self, 'About Pythics',
"""Python Instrument Control System, also known as Pythics
version 1.0.0
Copyright 2008 - 2019 Brian R. D'Urso
Pythics is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
Pythics is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with Pythics. If not, see
<http://www.gnu.org/licenses/>.""")
def menu_help(self):
# build the path to the help file
directory = os.path.dirname(inspect.getfile(pythics))
filename = os.path.join(directory, 'help', 'help.xml')
# open it
self.open_html_file(filename)
self.enable_menu_items()
#
# TabHtmlWindow - one for each primary html file
#
class TabHtmlWindow(pythics.html.HtmlWindow):
def __init__(self, parent, parent_process):
self.main_window = parent
self.parent_process = parent_process
self.title = None
super(TabHtmlWindow, self).__init__(parent, 'pythics.controls',
multiprocessing.get_logger())
# force widgets to redraw when the scrollbars are released
# this is needed for animated matplotlib widgets
self.verticalScrollBar().sliderReleased.connect(self.redraw)
self.horizontalScrollBar().sliderReleased.connect(self.redraw)
def redraw(self):
if not self.error:
self.child_process.redraw()
def close(self):
if not self.error:
try:
self.parent_process.stop_child_process(self.child_process)
except Exception:
self.logger.exception('Error while closing process.')
def set_title(self, title):
self.title = title
def open_file(self, filename):
self.error = False
try:
self.main_window.set_status_text('Loading file %s.' % filename)
self.html_file = filename
self.html_path, file_name_only = os.path.split(filename)
self.default_parameter_filename = 'defaults.txt'
anonymous_controls, controls = pythics.html.HtmlWindow.open_file(self, filename)
self.child_process = self.parent_process.new_child_process(self.html_path, file_name_only, anonymous_controls, controls)
self.child_process.start()
except:
message = 'Error while opening xml file %s\n' % file_name_only + traceback.format_exc(0)
QtWidgets.QMessageBox.critical(self, 'Error', message, QtWidgets.QMessageBox.Ok)
self.logger.exception('Error while opening xml file.')
self.error = True
self.main_window.set_status_text('')
if self.title is None:
self.title = file_name_only
self.set_title(self.title)
return self.title
def reload_file(self):
self.close()
self.reset()
# set current working directory
os.chdir(os.path.dirname(self.html_file))
return self.open_file(self.html_file)
# parameter save and recall functions for internal use
def load_parameters(self, filename='', default=False):
if not self.error:
try:
if default:
if not os.path.isabs(self.default_parameter_filename):
filename = os.path.join(self.html_path,
self.child_process.default_parameter_filename)
else:
if filename == '':
filename = self.main_window.get_open_filename('data (*.*)')
elif not os.path.isabs(filename):
filename = os.path.join(self.html_path, filename)
if filename != '':
try:
self.child_process.load_parameters(filename)
except IOError as error:
(errno, strerror) = error.args
self.logger.error('Error (%s) opening parameter file: %s.' % (errno, strerror))
except:
self.logger.exception('Error while loading parameters.')
def save_parameters(self, filename='', default=False):
if not self.error:
try:
if default:
if not os.path.isabs(self.default_parameter_filename):
filename = os.path.join(self.html_path,
self.child_process.default_parameter_filename)
else:
if filename == '':
filename = self.main_window.get_save_filename('data (*.*)')
elif not os.path.isabs(filename):
filename = os.path.join(self.html_path, filename)
if filename != '':
self.child_process.save_parameters(filename)
except:
self.logger.exception('Error while loading parameters.')
class OptionsProcessor(object):
def __init__(self):
# configure the logger
self.logger = multiprocessing.log_to_stderr()
#self.logger.setLevel(logging.DEBUG)
#self.logger.setLevel(logging.INFO)
self.logger.setLevel(logging.WARNING)
self.first_app = ""
self.first_workspace = ""
self.compact = False
self.shutdown_on_exit = False
def usage(self):
print("""\
Usage: pythics-run.py [options]
Options:
-h | --help show help text then exit
-a | --app selects startup app
-w | --workspace selects startup workspace
-c | --compact run in compact mode with simplified controls for small screens
-s | --shutdown shutdown computer on exit (*nix only)
-v | --verbose selects verbose mode
-d | --debug selects debug mode""")
def options(self):
try:
opts, args = getopt.getopt(sys.argv[1:], 'ha:w:csvd',
['help', 'app=', 'workspace=', 'compact', 'shutdown', 'verbose', 'debug'])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
self.usage()
sys.exit(2)
for o, a in opts:
if o in ('-v', '--verbose'):
self.logger.setLevel(logging.INFO)
elif o in ('-d', '--debug'):
self.logger.setLevel(logging.DEBUG)
elif o in ('-h', '--help'):
self.usage()
sys.exit(0)
elif o in ('-a', '--app'):
self.logger.info('opening app ' + a)
self.first_app = a
elif o in ('-w', '--workspace'):
self.logger.info('opening workspace ' + a)
self.first_workspace = a
elif o in ('-s', '--shutdown'):
self.logger.info('shutdown on exit')
self.shutdown_on_exit = True
elif o in ('-c', '--compact'):
self.logger.info('compact mode')
self.compact = True
else:
assert False, 'unhandled option'
#
# create and start the application
#
if __name__ == '__main__':
manager = multiprocessing.Manager()
application = QtWidgets.QApplication(sys.argv)
parent_process = pythics.parent.Parent(manager)
cl_options_processor = OptionsProcessor()
cl_options_processor.options()
window = MainWindow(parent_process, application, compact=cl_options_processor.compact)
window.show()
parent_process.start()
if os.path.isfile(cl_options_processor.first_workspace):
window.open_workspace(cl_options_processor.first_workspace)
elif os.path.isfile(cl_options_processor.first_app):
window.open_html_file(cl_options_processor.first_app)
window.shutdown_on_exit = cl_options_processor.shutdown_on_exit
application.exec_()
|
dursobr/Pythics
|
pythics/start.py
|
Python
|
gpl-3.0
| 27,005
|
[
"Brian"
] |
c20f023f62bcec64cd451b62f37ef6b4f5c472fd7a9341066cd823f7a07d6946
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import sys
import hashlib
import warnings
import itertools
import collections
import qcelemental as qcel
from .exceptions import *
from .psiutil import search_file
from .molecule import Molecule
from .libmintsgshell import ShellInfo
from .libmintsbasissetparser import Gaussian94BasisSetParser
from .basislist import corresponding_basis, corresponding_zeta
basishorde = {}
class BasisSet(object):
"""Basis set container class
Reads the basis set from a checkpoint file object. Also reads the molecule
from the checkpoint file storing the information in an internal Molecule class
which can be accessed using molecule().
"""
# <<< Globals >>>
# Has static information been initialized?
initialized_shared = False
# Global arrays of x, y, z exponents (Need libmint for max ang mom)
LIBINT_MAX_AM = 6 # TODO
exp_ao = [[] for l in range(LIBINT_MAX_AM)]
def __init__(self, *args):
# <<< Basic BasisSet Information >>>
# The name of this basis set (e.g. "BASIS", "RI BASIS")
self.name = None
# Array of gaussian shells
self.shells = None
# Molecule object.
self.molecule = None
# Shell information
self.atom_basis_shell = None
# <<< Scalars >>>
# Number of atomic orbitals (Cartesian)
self.PYnao = None
# Number of basis functions (either cartesian or spherical)
self.PYnbf = None
# The number of unique primitives
self.n_uprimitive = None
# The number of shells
self.n_shells = None
# The number of primitives
self.PYnprimitive = None
# The maximum angular momentum
self.PYmax_am = None
# The maximum number of primitives in a shell
self.PYmax_nprimitive = None
# Whether the basis set is uses spherical basis functions or not
self.puream = None
# <<< Arrays >>>
# The number of primitives (and exponents) in each shell
self.n_prim_per_shell = None
# The first (Cartesian) atomic orbital in each shell
self.shell_first_ao = None
# The first (Cartesian / spherical) basis function in each shell
self.shell_first_basis_function = None
# Shell number to atomic center.
self.shell_center = None
# Which shell does a given (Cartesian / spherical) function belong to?
self.function_to_shell = None
# Which shell does a given Cartesian function belong to?
self.ao_to_shell = None
# Which center is a given function on?
self.function_center = None
# How many shells are there on each center?
self.center_to_nshell = None
# What's the first shell on each center?
self.center_to_shell = None
# The flattened lists of unique exponents
self.uexponents = None
# The flattened lists of unique contraction coefficients (normalized)
self.ucoefficients = None
# The flattened lists of unique contraction coefficients (as provided by the user)
self.uoriginal_coefficients = None
# The flattened lists of ERD normalized contraction coefficients
self.uerd_coefficients = None
# The flattened list of Cartesian coordinates for each atom
self.xyz = None
# label/basis to number of core electrons mapping for ECPs
self.ecp_coreinfo = None
# Divert to constructor functions
if len(args) == 0:
self.constructor_zero_ao_basis()
elif len(args) == 2 and \
isinstance(args[0], BasisSet) and \
isinstance(args[1], int):
self.constructor_basisset_center(*args)
elif len(args) == 3 and \
isinstance(args[0], str) and \
isinstance(args[1], Molecule) and \
isinstance(args[2], collections.OrderedDict):
self.constructor_role_mol_shellmap(*args)
elif len(args) == 4 and \
isinstance(args[0], str) and \
isinstance(args[1], Molecule) and \
isinstance(args[2], collections.OrderedDict) and \
isinstance(args[3], bool):
self.constructor_role_mol_shellmap(*args)
else:
raise ValidationError('BasisSet::constructor: Inappropriate configuration of constructor arguments')
def __eq__(self, other):
"""Naive equality test. Haven't considered exp/coeff distribution among shells or AM"""
if isinstance(other, self.__class__):
if ((self.name == other.name) and
(self.puream == other.puream) and
(self.PYnao == other.PYnao) and
(self.PYnbf == other.PYnbf) and
(self.n_prim_per_shell == other.n_prim_per_shell) and
(self.ucoefficients == other.ucoefficients) and
(self.uexponents == other.uexponents)):
return True
else:
return False
return False
def __ne__(self, other):
return not self.__eq__(other)
def allclose(self, other, atol: float=1.e-8, verbose: int=1):
"""Equality test. Sorts the coefficients so handles different shell orderings. Print any failed exp/coeff differences if verbose > 1."""
sc, se = (list(t) for t in zip(*sorted(zip(self.uoriginal_coefficients, self.uexponents))))
oc, oe = (list(t) for t in zip(*sorted(zip(other.uoriginal_coefficients, other.uexponents))))
if isinstance(other, self.__class__):
if ((self.name == other.name) and
(self.puream == other.puream) and
(self.PYnao == other.PYnao) and
(self.PYnbf == other.PYnbf) and
(self.n_prim_per_shell == other.n_prim_per_shell) and
(all(abs(isc - ioc) < atol for isc, ioc in zip(sc, oc))) and
(all(abs(ise - ioe) < atol for ise, ioe in zip(se, oe)))):
return True
else:
if verbose > 1:
print("")
for idx in range(len(sc)):
if not((abs(sc[idx] - oc[idx]) < atol) and (abs(se[idx] - oe[idx]) < atol)):
print(f"{sc[idx]:20.12f} {oc[idx]:20.12f}\t\t{sc[idx] - oc[idx]:8.1E}\t\t\t{se[idx]:20.12f} {oe[idx]:20.12f}\t\t{se[idx] - oe[idx]:8.1E}")
return False
return False
# <<< Methods for Construction >>>
@classmethod
def initialize_singletons(cls):
"""Initialize singleton values that are shared by all basis set objects."""
# Populate the exp_ao arrays
for l in range(cls.LIBINT_MAX_AM):
for i in range(l + 1):
x = l - i
for j in range(i + 1):
y = i - j
z = j
cls.exp_ao[l].append([x, y, z])
cls.initialized_shared = True
def constructor_zero_ao_basis(self):
"""Constructs a zero AO basis set"""
if not self.initialized_shared:
self.initialize_singletons()
# Add a dummy atom at the origin, to hold this basis function
self.molecule = Molecule()
self.molecule.add_atom(0, 0.0, 0.0, 0.0, 'X')
# Fill with data representing a single S function, at the origin, with 0 exponent
self.n_uprimitive = 1
self.n_shells = 1
self.PYnprimitive = 1
self.PYnao = 1
self.PYnbf = 1
self.uerd_coefficients = [1.0]
self.n_prim_per_shell = [1]
self.uexponents = [0.0]
self.ucoefficients = [1.0]
self.uoriginal_coefficients = [1.0]
self.shell_first_ao = [0]
self.shell_first_basis_function = [0]
self.ao_to_shell = [0]
self.function_to_shell = [0]
self.function_center = [0]
self.shell_center = [0]
self.center_to_nshell = [0]
self.center_to_shell = [0]
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 1
self.xyz = [0.0, 0.0, 0.0]
self.name = '(Empty Basis Set)'
self.shells = []
self.shells.append(ShellInfo(0, self.uoriginal_coefficients,
self.uexponents, 'Cartesian', 0, self.xyz, 0))
def constructor_role_mol_shellmap(self, role, mol, shell_map, is_ecp = False):
"""The most commonly used constructor. Extracts basis set name for *role*
from each atom of *mol*, looks up basis and role entries in the
*shell_map* dictionary, retrieves the ShellInfo objects and returns
the BasisSet.
"""
self.molecule = mol
self.name = role
self.xyz = self.molecule.geometry() # not used in libmints but this seems to be the intent
self.atom_basis_shell = shell_map
natom = self.molecule.natom()
# Singletons
if not self.initialized_shared:
self.initialize_singletons()
# These will tell us where the primitives for [basis][symbol] start and end in the compact array
primitive_start = {}
primitive_end = {}
# First, loop over the unique primitives, and store them
uexps = []
ucoefs = []
uoriginal_coefs = []
uerd_coefs = []
rpowers = []
self.n_uprimitive = 0
for symbolfirst, symbolsecond in shell_map.items():
label = symbolfirst
basis_map = symbolsecond
primitive_start[label] = {}
primitive_end[label] = {}
for basisfirst, basissecond in basis_map.items():
basis = basisfirst
shells = basis_map[basis] # symbol --> label
primitive_start[label][basis] = self.n_uprimitive # symbol --> label
for i in range(len(shells)):
shell = shells[i]
for prim in range(shell.nprimitive()):
rpowers.append(shell.rpower(prim))
uexps.append(shell.exp(prim))
ucoefs.append(shell.coef(prim))
uoriginal_coefs.append(shell.original_coef(prim))
uerd_coefs.append(shell.erd_coef(prim))
self.n_uprimitive += 1
primitive_end[label][basis] = self.n_uprimitive # symbol --> label
# Count basis functions, shells and primitives
self.n_shells = 0
self.PYnprimitive = 0
self.PYnao = 0
self.PYnbf = 0
for n in range(natom):
atom = self.molecule.atom_entry(n)
basis = atom.basisset(role)
label = atom.label() # symbol --> label
shells = shell_map[label][basis] # symbol --> label
for i in range(len(shells)):
shell = shells[i]
nprim = shell.nprimitive()
self.PYnprimitive += nprim
self.n_shells += 1
self.PYnao += shell.ncartesian()
self.PYnbf += shell.nfunction()
# Allocate arrays
self.n_prim_per_shell = [0] * self.n_shells
# The unique primitives
self.uexponents = [0.0] * self.n_uprimitive
self.ucoefficients = [0.0] * self.n_uprimitive
self.uoriginal_coefficients = [0.0] * self.n_uprimitive
self.uerd_coefficients = [0.0] * self.n_uprimitive
for i in range(self.n_uprimitive):
self.uexponents[i] = uexps[i]
self.ucoefficients[i] = ucoefs[i]
self.uoriginal_coefficients[i] = uoriginal_coefs[i]
self.uerd_coefficients[i] = uerd_coefs[i]
self.shell_first_ao = [0] * self.n_shells
self.shell_first_basis_function = [0] * self.n_shells
self.shells = [None] * self.n_shells
self.ao_to_shell = [0] * self.PYnao
self.function_to_shell = [0] * self.PYnbf
self.function_center = [0] * self.PYnbf
self.shell_center = [0] * self.n_shells
self.center_to_nshell = [0] * natom
self.center_to_shell = [0] * natom
# Now loop over all atoms, and point to the appropriate unique data
shell_count = 0
ao_count = 0
bf_count = 0
xyz_ptr = [0.0, 0.0, 0.0] # libmints seems to be always passing ShellInfo zeros, so following suit
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 0
for n in range(natom):
atom = self.molecule.atom_entry(n)
basis = atom.basisset(role)
label = atom.label() # symbol --> label
shells = shell_map[label][basis] # symbol --> label
ustart = primitive_start[label][basis] # symbol --> label
uend = primitive_end[label][basis] # symbol --> label
nshells = len(shells)
self.center_to_nshell[n] = nshells
self.center_to_shell[n] = shell_count
atom_nprim = 0
for i in range(nshells):
thisshell = shells[i]
self.shell_first_ao[shell_count] = ao_count
self.shell_first_basis_function[shell_count] = bf_count
shell_nprim = thisshell.nprimitive()
am = thisshell.am()
self.PYmax_nprimitive = max(shell_nprim, self.PYmax_nprimitive)
self.PYmax_am = max(am, self.PYmax_am)
self.shell_center[shell_count] = n
self.puream = thisshell.is_pure()
tst = ustart + atom_nprim
tsp = ustart + atom_nprim + shell_nprim
self.shells[shell_count] = ShellInfo(am,
self.uoriginal_coefficients[tst:tsp],
self.uexponents[tst:tsp],
'Pure' if self.puream else 'Cartesian',
n, xyz_ptr, bf_count, pt='Normalized' if is_ecp else 'Unnormalized',
rpowers=rpowers[tst:tsp])
for thisbf in range(thisshell.nfunction()):
self.function_to_shell[bf_count] = shell_count
self.function_center[bf_count] = n
bf_count += 1
for thisao in range(thisshell.ncartesian()):
self.ao_to_shell[ao_count] = shell_count
ao_count += 1
atom_nprim += shell_nprim
shell_count += 1
if atom_nprim != uend - ustart:
raise ValidationError("Problem with nprimitive in basis set construction!")
def constructor_basisset_center(self, bs, center):
"""
* Creates a new basis set object for an atom, from an existing basis set
* bs: the basis set to copy data from
* center: the atom in bs to copy over
"""
# Singletons; these should've been initialized by this point, but just in case
if not self.initialized_shared:
self.initialize_singletons()
# First, find the shells we need, and grab the data
uexps = []
ucoefs = []
uoriginal_coefs = []
uerd_coefs = []
self.name = bs.name
self.n_shells = 0
self.n_uprimitive = 0
self.PYnao = 0
self.PYnbf = 0
for shelln in range(bs.nshell()):
shell = bs.shell(shelln)
if shell.ncenter() == center:
nprim = shell.nprimitive()
for prim in range(nprim):
uexps.append(shell.exp(prim))
ucoefs.append(shell.coef(prim))
uoriginal_coefs.append(shell.original_coef(prim))
uerd_coefs.append(shell.erd_coef(prim))
self.n_uprimitive += 1
self.n_shells += 1
self.PYnao += shell.ncartesian()
self.PYnbf += shell.nfunction()
self.PYnprimitive = self.n_uprimitive
# Create a "molecule", i.e., an atom, with 1 fragment
mol = bs.molecule
self.molecule = Molecule.from_arrays(elem=[mol.symbol(center)],
geom=mol.xyz(center),
units='Bohr',
fix_com=True,
verbose=0)
# Allocate arrays
self.n_prim_per_shell = [0] * self.n_shells
# The unique primitives
self.uexponents = [0.0] * self.n_uprimitive
self.ucoefficients = [0.0] * self.n_uprimitive
self.uoriginal_coefficients = [0.0] * self.n_uprimitive
self.uerd_coefficients = [0.0] * self.n_uprimitive
for i in range(self.n_uprimitive):
self.uexponents[i] = uexps[i]
self.ucoefficients[i] = ucoefs[i]
self.uoriginal_coefficients[i] = uoriginal_coefs[i]
self.uerd_coefficients[i] = uerd_coefs[i]
self.shell_first_ao = [0] * self.n_shells
self.shell_first_basis_function = [0] * self.n_shells
self.shells = [None] * self.n_shells
self.ao_to_shell = [0] * self.PYnao
self.function_to_shell = [0] * self.PYnbf
self.function_center = [0] * self.PYnbf
self.shell_center = [0] * self.n_shells
self.center_to_nshell = [0]
self.center_to_shell = [0]
self.xyz = mol.xyz(center)
# Now loop over shell for this atom, and point to the appropriate unique data
shell_count = 0
ao_count = 0
bf_count = 0
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 0
prim_count = 0
for shelln in range(bs.nshell()):
shell = bs.shell(shelln)
if shell.ncenter() == center:
self.center_to_nshell[0] = self.n_shells
#self.center_to_shell[0] = shell_count # diff from libmints
self.shell_first_ao[shell_count] = ao_count
self.shell_first_basis_function[shell_count] = bf_count
shell_nprim = shell.nprimitive()
am = shell.am()
self.PYmax_nprimitive = shell_nprim if shell_nprim > self.PYmax_nprimitive else self.PYmax_nprimitive
self.PYmax_am = max(self.PYmax_am, am)
self.shell_center[shell_count] = center
self.puream = shell.is_pure()
tst = prim_count
tsp = prim_count + shell_nprim
self.shells[shell_count] = ShellInfo(am,
self.uoriginal_coefficients[tst:tsp],
self.uexponents[tst:tsp],
'Pure' if self.puream else 'Cartesian',
center, self.xyz, bf_count, pt='Unnormalized', rpowers=None)
self.shells[shell_count].pyprint()
for thisbf in range(shell.nfunction()):
self.function_to_shell[bf_count] = shell_count
self.function_center[bf_count] = center
bf_count += 1
for thisao in range(shell.ncartesian()):
self.ao_to_shell[ao_count] = shell_count
ao_count += 1
shell_count += 1
prim_count += shell_nprim
# <<< Methods for Construction by Another Name >>>
@staticmethod
def zero_ao_basis_set():
"""Returns an empty basis set object.
Returns a BasisSet object that actually has a single s-function
at the origin with an exponent of 0.0 and contraction of 1.0.
* @return A new empty BasisSet object.
"""
# In the new implementation, we simply call the default constructor
return BasisSet()
def atomic_basis_set(self, center):
"""Return a BasisSet object containing all shells at center i (0-index)
* Used for Atomic HF computations for SAD Guesses
* @param center Atomic center to provide a basis object for.
* @returns A new basis set object for the atomic center.
"""
return BasisSet(self, center)
@staticmethod
def build(molecule, shells):
"""Builder factory method
* @param molecule the molecule to build the BasisSet around
* @param shells array of *atom-numbered* ShellInfo to build the BasisSet from
* @return BasisSet corresponding to this molecule and set of shells
"""
raise FeatureNotImplemented('BasisSet::build')
#TRIAL# @staticmethod
#TRIAL# def pyconstruct_combined(mol, keys, targets, fitroles, others):
#TRIAL#
#TRIAL# # make sure the lengths are all the same
#TRIAL# if len(keys) != len(targets) or len(keys) != len(fitroles):
#TRIAL# raise ValidationError("""Lengths of keys, targets, and fitroles must be equal""")
#TRIAL#
#TRIAL# # Create (if necessary) and update qcdb.Molecule
#TRIAL# if isinstance(mol, basestring):
#TRIAL# mol = Molecule(mol)
#TRIAL# returnBasisSet = False
#TRIAL# elif isinstance(mol, Molecule):
#TRIAL# returnBasisSet = True
#TRIAL# else:
#TRIAL# raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
#TRIAL# mol.update_geometry()
#TRIAL#
#TRIAL# # load in the basis sets
#TRIAL# sets = []
#TRIAL# name = ""
#TRIAL# for at in range(len(keys)):
#TRIAL# bas = BasisSet.pyconstruct(mol, keys[at], targets[at], fitroles[at], others[at])
#TRIAL# name += targets[at] + " + "
#TRIAL# sets.append(bas)
#TRIAL#
#TRIAL# name = name[:-3].strip()
#TRIAL# # work our way through the sets merging them
#TRIAL# combined_atom_basis_shell = OrderedDict()
#TRIAL# for at in range(len(sets)):
#TRIAL# atom_basis_shell = sets[at].atom_basis_shell
#TRIAL#
#TRIAL# for label, basis_map in atom_basis_shell.items():
#TRIAL# if label not in combined_atom_basis_shell:
#TRIAL# combined_atom_basis_shell[label] = OrderedDict()
#TRIAL# combined_atom_basis_shell[label][name] = []
#TRIAL# for basis, shells in basis_map.items():
#TRIAL# combined_atom_basis_shell[label][name].extend(shells)
#TRIAL#
#TRIAL# #for label, basis_map in combined_atom_basis_shell.items():
#TRIAL# # # sort the shells by angular momentum
#TRIAL# # combined_atom_basis_shell[label][name] = sorted(combined_atom_basis_shell[label][name], key=lambda shell: she
#TRIAL#
#TRIAL# # Molecule and parser prepped, call the constructor
#TRIAL# mol.set_basis_all_atoms(name, "CABS")
#TRIAL#
#TRIAL# # Construct the grand BasisSet for mol
#TRIAL# basisset = BasisSet("CABS", mol, combined_atom_basis_shell)
#TRIAL#
#TRIAL# # Construct all the one-atom BasisSet-s for mol's CoordEntry-s
#TRIAL# for at in range(mol.natom()):
#TRIAL# oneatombasis = BasisSet(basisset, at)
#TRIAL# oneatombasishash = hashlib.sha1(oneatombasis.print_detail(numbersonly=True).encode('utf-8')).hexdigest()
#TRIAL# mol.set_shell_by_number(at, oneatombasishash, role="CABS")
#TRIAL# mol.update_geometry() # re-evaluate symmetry taking basissets into account
#TRIAL#
#TRIAL# text = """ => Creating Basis Set <=\n\n"""
#TRIAL# text += """ Role: %s\n""" % (fitroles)
#TRIAL# text += """ Keyword: %s\n""" % (keys)
#TRIAL# text += """ Name: %s\n""" % (name)
#TRIAL#
#TRIAL# if returnBasisSet:
#TRIAL# print(text)
#TRIAL# return basisset
#TRIAL# else:
#TRIAL# bsdict = {}
#TRIAL# bsdict['message'] = text
#TRIAL# bsdict['name'] = basisset.name
#TRIAL# bsdict['puream'] = int(basisset.has_puream())
#TRIAL# bsdict['shell_map'] = basisset.export_for_libmints("CABS")
#TRIAL# return bsdict
@staticmethod
def pyconstruct(mol, key, target, fitrole='ORBITAL', other=None, return_atomlist=False, return_dict=False, verbose=1):
"""Builds a BasisSet object for *mol* (either a qcdb.Molecule or
a string that can be instantiated into one) from basis set
specifications passed in as python functions or as a string that
names a basis to be applied to all atoms. Always required is the
keyword *key* and string/function *target* of the basis to be
constructed. For orbital basis sets, *key* will likely be 'BASIS'
and, together with *target*, these arguments suffice.
``pyconstruct(smol, "BASIS", basisspec_psi4_yo_631pg_d_p_)``
``pyconstruct(mol, "BASIS", "6-31+G**")``
When building an auxiliary basis, *key* is again the keyword,
*target* is the string or function for the fitting basis (this
may also be an empty string). In case the fitting basis isn't
fully specified, also provide a *fitrole* and the string/function
of the orbital basis as *other*, so that orbital hints can be
used to look up a suitable default basis in BasisFamily.
``pyconstruct(smol, "DF_BASIS_MP2", basisspec_psi4_yo_ccpvdzri, 'RIFIT', basisspec_psi4_yo_631pg_d_p_)``
``pyconstruct(mol, "DF_BASIS_MP2", "", "RIFIT", "6-31+G(d,p)")``
Parameters
----------
mol : :py:class:`qcdb.Molecule` or dict or str
If not a :py:class:`qcdb.Molecule`, something that can be converted into one.
If the latter, the basisset dict is returned rather than the BasisSet object.
If you've got a psi4.core.Molecule, pass `qcdb.Molecule(psimol.to_dict())`.
key : {'BASIS', 'DF_BASIS_SCF', 'DF_BASIS_MP2', 'DF_BASIS_CC'}
Label (effectively Psi4 keyword) to append the basis on the molecule.
target : str or function
Defines the basis set to be constructed. Can be a string (naming a
basis file) or a function (multiple files, shells).
Required for `fitrole='ORBITAL'`. For auxiliary bases to be built
entirely from orbital default, can be empty string.
fitrole : {'ORBITAL', 'JKFIT', 'RIFIT'}
other :
Only used when building fitting bases. Like `target` only supplies
the definitions for the orbital basis.
return_atomlist : bool, optional
Build one-atom basis sets (for SAD) rather than one whole-Mol basis set
return_dict : bool, optional
Additionally return the dictionary representation of built BasisSet
Returns
-------
bas : :py:class:`qcdb.BasisSet`
Built BasisSet object for `mol`.
dbas : dict, optional
Only returned if `return_dict=True`. Suitable for feeding to libmints.
"""
orbonly = True if (fitrole == 'ORBITAL' and other is None) else False
if orbonly:
orb = target
aux = None
else:
orb = other
aux = target
if verbose >= 2:
print('BasisSet::pyconstructP', 'key =', key, 'aux =', aux, 'fitrole =', fitrole, 'orb =', orb, 'orbonly =', orbonly) #, mol)
# Create (if necessary) and update qcdb.Molecule
if not isinstance(mol, Molecule):
mol = Molecule(mol)
mol.update_geometry()
# Apply requested basis set(s) to the molecule
# - basstrings only a temp object so using fitrole as dict key instead of psi4 keyword
# - error checking not needed since C-side already checked for NULL ptr
mol.clear_basis_all_atoms()
# TODO now need to clear shells, too
basstrings = collections.defaultdict(dict)
if orb is None or orb == '':
raise ValidationError("""Orbital basis argument must not be empty.""")
elif callable(orb):
basstrings['BASIS'] = orb(mol, 'BASIS')
callby = orb.__name__.replace('basisspec_psi4_yo__', '')
elif orb in basishorde:
basstrings['BASIS'] = basishorde[orb](mol, 'BASIS')
callby = orb
elif isinstance(orb, str):
mol.set_basis_all_atoms(orb, role='BASIS')
callby = orb
else:
raise ValidationError("""Orbital basis argument must be function that applies basis sets to Molecule or a string of the basis to be applied to all atoms.""")
if not orbonly:
if aux is None or aux == '':
callby = '({} Aux)'.format(callby)
elif callable(aux):
basstrings[fitrole] = aux(mol, fitrole)
callby = aux.__name__.replace('basisspec_psi4_yo__', '')
elif isinstance(aux, str):
mol.set_basis_all_atoms(aux, role=fitrole)
callby = aux
else:
raise ValidationError("""Auxiliary basis argument must be function that applies basis sets to Molecule or a string of the basis to be applied to all atoms.""")
# Not like we're ever using a non-G94 format
parser = Gaussian94BasisSetParser()
# Molecule and parser prepped, call the constructor
bs, msg, ecp = BasisSet.construct(parser, mol,
'BASIS' if fitrole == 'ORBITAL' else fitrole,
None if fitrole == 'ORBITAL' else fitrole,
basstrings['BASIS'] if fitrole == 'ORBITAL' else basstrings[fitrole],
return_atomlist=return_atomlist,
verbose=verbose)
text = """ => Loading Basis Set <=\n\n"""
text += """ Name: %s\n""" % (callby.upper())
text += """ Role: %s\n""" % (fitrole)
text += """ Keyword: %s\n""" % (key)
text += msg
if return_atomlist:
if return_dict:
atom_basis_list = []
for atbs in bs:
bsdict = {}
bsdict['message'] = text
bsdict['key'] = key
bsdict['name'] = callby.upper()
bsdict['blend'] = atbs.name.upper()
bsdict['puream'] = int(atbs.has_puream())
bsdict['shell_map'] = atbs.export_for_libmints('BASIS' if fitrole == 'ORBITAL' else fitrole)
if ecp:
bsdict['ecp_shell_map'] = ecp.export_for_libmints('BASIS')
bsdict['molecule'] = atbs.molecule.to_dict(force_c1=True)
atom_basis_list.append(bsdict)
return bs, atom_basis_list
else:
return bs
if return_dict:
bsdict = {}
bsdict['message'] = text
bsdict['key'] = key
bsdict['name'] = callby.upper()
bsdict['blend'] = bs.name.upper()
bsdict['puream'] = int(bs.has_puream())
bsdict['shell_map'] = bs.export_for_libmints('BASIS' if fitrole == 'ORBITAL' else fitrole)
if ecp:
bsdict['ecp_shell_map'] = ecp.export_for_libmints('BASIS')
return bs, bsdict
else:
return bs
@classmethod
def construct(cls, parser, mol, key, deffit=None, basstrings=None, return_atomlist=False, verbose=1):
"""Returns a new BasisSet object configured from the *mol*
Molecule object for *key* (generally a Psi4 keyword: BASIS,
DF_BASIS_SCF, etc.). Fails utterly if a basis has not been set for
*key* for every atom in *mol*, unless *deffit* is set (JFIT,
JKFIT, or RIFIT), whereupon empty atoms are assigned to *key*
from the :py:class:`~BasisFamily`. This function is significantly
re-worked from its libmints analog.
Parameters
----------
mol : qcdb.Molecule
A molecule object for which every atom has had a basisset set for `key`
basstrings : dict, optional
Additional source for basis data. Keys are regularized basis names and values are gbs strings.
return_atomlist
Return list of one-atom BasisSet-s, rather than single whole-mol BasisSet.
"""
# Update geometry in molecule, if there is a problem an exception is thrown.
mol.update_geometry()
# Paths to search for gbs files: here + PSIPATH + library
try:
from psi4 import core
except ImportError:
libraryPath = ''
else:
psidatadir = core.get_datadir()
libraryPath = os.pathsep + os.path.join(os.path.abspath(psidatadir), 'basis')
#nolongerenvvar psidatadir = os.environ.get('PSIDATADIR', None)
#nolongerpredicatble psidatadir = __file__ + '/../../..' if psidatadir is None else psidatadir
basisPath = os.path.abspath('.') + os.pathsep
basisPath += os.pathsep.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(os.pathsep)])
basisPath += libraryPath
# Validate deffit for key
univdef_zeta = 4
univdef = {'JFIT': ('def2-universal-jfit', 'def2-universal-jfit', None),
'JKFIT': ('def2-universal-jkfit', 'def2-universal-jkfit', None),
'RIFIT': ('def2-qzvpp-ri', 'def2-qzvpp-ri', None),
'DECON': (None, None, BasisSet.decontract),
'F12': ('def2-qzvpp-f12', 'def2-qzvpp-f12', None)}
if deffit is not None:
if deffit not in univdef.keys():
raise ValidationError("""BasisSet::construct: deffit argument invalid: %s""" % (deffit))
# Map of ShellInfo
atom_basis_shell = collections.OrderedDict()
ecp_atom_basis_shell = collections.OrderedDict()
ecp_atom_basis_ncore = collections.OrderedDict()
names = {}
summary = []
bastitles = []
for at in range(mol.natom()):
symbol = mol.atom_entry(at).symbol() # O, He
label = mol.atom_entry(at).label() # O3, C_Drot, He
basdict = mol.atom_entry(at).basissets() # {'BASIS': 'sto-3g', 'DF_BASIS_MP2': 'cc-pvtz-ri'}
if label not in atom_basis_shell:
atom_basis_shell[label] = collections.OrderedDict()
if label not in ecp_atom_basis_shell:
ecp_atom_basis_shell[label] = collections.OrderedDict()
# Establish search parameters for what/where basis entries suitable for atom
seek = {}
try:
requested_basname = basdict[key]
except KeyError:
if key == 'BASIS' or deffit is None:
raise BasisSetNotDefined("""BasisSet::construct: No basis set specified for %s and %s.""" %
(symbol, key))
else:
# No auxiliary / decon basis set for atom, so try darnedest to find one.
# This involves querying the BasisFamily for default and
# default-default and finally the universal default (defined
# in this function). Since user hasn't indicated any specifics,
# look for symbol only, not label.
tmp = []
tmp.append(corresponding_basis(basdict['BASIS'], deffit))
#NYI#tmp.append(corresponding_basis(basdict['BASIS'], deffit + '-DEFAULT'))
orbital_zeta = corresponding_zeta(basdict['BASIS'])
if orbital_zeta is None or orbital_zeta <= univdef_zeta:
tmp.append(univdef[deffit])
seek['basis'] = [item for item in tmp if item != (None, None, None)]
seek['entry'] = [symbol]
seek['path'] = basisPath
seek['strings'] = ''
else:
# User (I hope ... dratted has_changed) has set basis for atom,
# so look only for basis (don't try defaults), look for label (N88)
# or symbol (N) (in that order; don't want to restrict use of atom
# labels to basis set spec), look everywhere (don't just look
# in library)
if requested_basname.lower().endswith('-decon'):
bas_recipe = requested_basname, requested_basname[:-6], BasisSet.decontract
else:
bas_recipe = requested_basname, requested_basname, None
seek['basis'] = [bas_recipe]
seek['entry'] = [symbol] if symbol == label else [label, symbol]
seek['path'] = basisPath
seek['strings'] = '' if basstrings is None else list(basstrings.keys())
if verbose >= 2:
print(""" Shell Entries: %s""" % (seek['entry']))
print(""" Basis Sets: %s""" % (seek['basis']))
print(""" File Path: %s""" % (', '.join(map(str, seek['path'].split(os.pathsep)))))
print(""" Input Blocks: %s\n""" % (', '.join(seek['strings'])))
# Search through paths, bases, entries
for bas in seek['basis']:
(bastitle, basgbs, postfunc) = bas
filename = cls.make_filename(basgbs)
# -- First seek bas string in input file strings
if filename[:-4] in seek['strings']:
index = 'inputblock %s' % (filename[:-4])
# Store contents
if index not in names:
names[index] = basstrings[filename[:-4]].split('\n')
else:
# -- Else seek bas.gbs file in path
fullfilename = search_file(_basis_file_warner_and_aliaser(filename), seek['path'])
if fullfilename is None:
# -- Else skip to next bas
continue
# Store contents so not reloading files
index = 'file %s' % (fullfilename)
if index not in names:
names[index] = parser.load_file(fullfilename)
lines = names[index]
for entry in seek['entry']:
# Seek entry in lines, else skip to next entry
shells, msg, ecp_shells, ecp_msg, ecp_ncore = parser.parse(entry, lines)
if shells is None:
continue
# Found!
# -- Post-process
if postfunc:
shells = postfunc(shells)
fmsg = 'func {}'.format(postfunc.__name__)
else:
fmsg = ''
# -- Assign to Molecule
atom_basis_shell[label][bastitle] = shells
ecp_atom_basis_shell[label][bastitle] = ecp_shells
if key == 'BASIS':
ecp_atom_basis_ncore[label] = ecp_ncore
mol.set_basis_by_number(at, bastitle, role=key)
bastitles.append(bastitle.upper())
if ecp_msg:
summary.append("""entry %-10s %s (ECP: %s) %s %s""" % (entry, msg, ecp_msg, index, fmsg))
else:
summary.append("""entry %-10s %s %s %s""" % (entry, msg, index, fmsg))
break
# Break from outer loop if inner loop breaks
else:
continue
break
else:
# Ne'er found :-(
text2 = """ Shell Entries: %s\n""" % (seek['entry'])
text2 += """ Basis Sets: %s\n""" % (seek['basis'])
text2 += """ File Path: %s\n""" % (', '.join(map(str, seek['path'].split(os.pathsep))))
text2 += """ Input Blocks: %s\n""" % (', '.join(seek['strings']))
raise BasisSetNotFound('BasisSet::construct: Unable to find a basis set for atom %d for key %s among:\n%s' % \
(at + 1, key, text2))
# Construct the grand BasisSet for mol
basisset = BasisSet(key, mol, atom_basis_shell)
# If an ECP was detected, and we're building BASIS, process it now
ecpbasisset = None
ncore = 0
for atom in ecp_atom_basis_ncore:
ncore += ecp_atom_basis_ncore[atom]
if ncore and key == 'BASIS':
ecpbasisset = BasisSet(key, mol, ecp_atom_basis_shell, True)
ecpbasisset.ecp_coreinfo = ecp_atom_basis_ncore
# Construct all the one-atom BasisSet-s for mol's CoordEntry-s
atom_basis_list = []
for at in range(mol.natom()):
oneatombasis = BasisSet(basisset, at)
oneatombasishash = hashlib.sha1(oneatombasis.print_detail(numbersonly=True).encode('utf-8')).hexdigest()
if return_atomlist:
oneatombasis.molecule.set_shell_by_number(0, oneatombasishash, role=key)
atom_basis_list.append(oneatombasis)
mol.set_shell_by_number(at, oneatombasishash, role=key)
mol.update_geometry() # re-evaluate symmetry taking basissets into account
bastitles = list(set(bastitles))
bastitles.sort()
basisset.name = ' + '.join(bastitles)
# Summary printing
tmp = collections.defaultdict(list)
for at, v in enumerate(summary):
tmp[v].append(at + 1)
tmp2 = collections.OrderedDict()
maxsats = 0
for item in sorted(tmp.values()):
for msg, ats in tmp.items():
if item == ats:
G = (list(x) for _, x in itertools.groupby(ats, lambda x, c=itertools.count(): next(c) - x))
sats = ", ".join("-".join(map(str, (g[0], g[-1])[:len(g)])) for g in G)
maxsats = max(maxsats, len(sats))
tmp2[sats] = msg
text = ''
for ats, msg in tmp2.items():
text += """ atoms %s %s\n""" % (ats.ljust(maxsats), msg)
text += '\n'
if return_atomlist:
return atom_basis_list, text, ecpbasisset
else:
return basisset, text, ecpbasisset
# <<< Simple Methods for Basic BasisSet Information >>>
def name(self):
"""Returns the name of this basis set"""
return self.name
def set_name(self, name):
"""Sets the name of this basis set"""
self.name = name
# JET added but I think should fail
#+ def atom_shell_map(self):
#+ return self.atom_shell_map
def nprimitive(self):
"""Number of primitives.
* @return The total number of primitives in all contractions.
"""
return self.PYnprimitive
def max_nprimitive(self):
"""Maximum number of primitives in a shell.
* Examines each shell and find the shell with the maximum number of primitives returns that
* number of primitives.
* @return Maximum number of primitives.
"""
return self.PYmax_nprimitive
def nshell(self):
"""Number of shells.
* @return Number of shells.
"""
return self.n_shells
def nao(self):
"""Number of atomic orbitals (Cartesian).
* @return The number of atomic orbitals (Cartesian orbitals, always).
"""
return self.PYnao
def nbf(self):
"""Number of basis functions (Spherical).
* @return The number of basis functions (Spherical, if has_puream() == true).
"""
return self.PYnbf
def max_am(self):
"""Maximum angular momentum used in the basis set.
* @return Maximum angular momentum.
"""
return self.PYmax_am
def has_puream(self):
"""Spherical harmonics?
* @return true if using spherical harmonics
"""
return self.puream
def max_function_per_shell(self):
"""Compute the maximum number of basis functions contained in a shell.
* @return The max number of basis functions in a shell.
"""
return 2 * self.PYmax_am + 1 if self.puream else (self.PYmax_am + 1) * (self.PYmax_am + 2) / 2
def molecule(self):
"""Molecule this basis is for.
* @return Shared pointer to the molecule for this basis set.
"""
return self.molecule
def shell_to_ao_function(self, i):
"""Given a shell what is its first AO function
* @param i Shell number
* @return The function number for the first function for the i'th shell.
"""
return self.shell_first_ao[i]
def shell_to_center(self, i):
"""Given a shell what is its atomic center
* @param i Shell number
* @return The atomic center for the i'th shell.
"""
return self.shell_center[i]
def shell_to_basis_function(self, i):
"""Given a shell what is its first basis function (spherical) function
* @param i Shell number
* @return The function number for the first function for the i'th shell.
"""
return self.shell_first_basis_function[i]
def function_to_shell(self, i):
"""Given a function number what shell does it correspond to."""
return self.function_to_shell[i]
def function_to_center(self, i):
"""Given a function what is its atomic center
* @param i Function number
* @return The atomic center for the i'th function.
"""
return self.function_center[i]
def ao_to_shell(self, i):
"""Given a Cartesian function (AO) number what shell does it correspond to."""
return self.ao_to_shell[i]
def shell(self, si, center=None):
"""Return the si'th Gaussian shell on center
* @param i Shell number
* @return A shared pointer to the ShellInfo object for the i'th shell.
"""
if center is not None:
si += self.center_to_shell[center]
if si < 0 or si > self.nshell():
text = """BasisSet::shell(si = %d), requested a shell out-of-bound.\n Max shell size: %d\n Name: %s\n""" % \
(si, self.nshell(), self.name)
raise ValidationError("BasisSet::shell: requested shell is out-of-bounds:\n%s" % (text))
return self.shells[si]
def nshell_on_center(self, i):
"""Return the number of shells on a given center."""
return self.center_to_nshell[i]
def shell_on_center(self, center, shell):
"""Return the overall shell number"""
return self.center_to_shell[center] + shell
# <<< Methods for Printing >>>
def print_by_level(self, out=None, level=2):
"""Print basis set information according to the level of detail in print_level
@param out The file stream to use for printing. Defaults to outfile.
@param print_level: defaults to 2
* < 1: Nothing
* 1: Brief summary
* 2: Summary and contraction details
* > 2: Full details
"""
if level < 1:
return
elif level == 1:
text = self.pyprint(out=None)
elif level == 2:
text = self.print_summary(out=None)
elif level > 2:
text = self.print_detail(out=None)
if out is None:
print(text)
else:
with open(out, mode='w') as handle:
handle.write(text)
def pyprint(self, out=None):
"""Print the basis set.
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
text += """ Basis Set: %s\n""" % (self.name)
text += """ Number of shells: %d\n""" % (self.nshell())
text += """ Number of basis function: %d\n""" % (self.nbf())
text += """ Number of Cartesian functions: %d\n""" % (self.nao())
text += """ Spherical Harmonics?: %s\n""" % ('true' if self.has_puream() else 'false')
text += """ Max angular momentum: %d\n\n""" % (self.max_am())
#text += """ Source:\n%s\n""" % (self.source()) # TODO
if out is None:
return text
else:
with open(outfile, mode='w') as handle:
handle.write(text)
def print_summary(self, out=None):
"""Prints a short string summarizing the basis set
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
text += """ -AO BASIS SET INFORMATION:\n"""
text += """ Name = %s\n""" % (self.name)
text += """ Total number of shells = %d\n""" % (self.nshell())
text += """ Number of primitives = %d\n""" % (self.nprimitive())
text += """ Number of AO = %d\n""" % (self.nao())
text += """ Number of SO = %d\n""" % (self.nbf())
text += """ Maximum AM = %d\n""" % (self.max_am())
text += """ Spherical Harmonics = %s\n""" % ('TRUE' if self.puream else 'FALSE')
text += """\n"""
text += """ -Contraction Scheme:\n"""
text += """ Atom Type All Primitives // Shells:\n"""
text += """ ------ ------ --------------------------\n"""
for A in range(self.molecule.natom()):
nprims = [0] * (self.PYmax_am + 1)
nunique = [0] * (self.PYmax_am + 1)
nshells = [0] * (self.PYmax_am + 1)
amtypes = [None] * (self.PYmax_am + 1)
text += """ %4d """ % (A + 1)
text += """%2s """ % (self.molecule.symbol(A))
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
for Q in range(n_shell):
shell = self.shells[Q + first_shell]
nshells[shell.am()] += 1
nunique[shell.am()] += shell.nprimitive()
nprims[shell.am()] += shell.nprimitive()
amtypes[shell.am()] = shell.amchar()
# All Primitives
for l in range(self.PYmax_am + 1):
if nprims[l] == 0:
continue
text += """%d%c """ % (nprims[l], amtypes[l])
# Shells
text += """// """
for l in range(self.PYmax_am + 1):
if nshells[l] == 0:
continue
text += """%d%c """ % (nshells[l], amtypes[l])
text += """\n"""
text += """\n"""
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
def print_detail(self, out=None, numbersonly=False):
"""Prints a detailed PSI3-style summary of the basis (per-atom)
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
if not numbersonly:
text += self.print_summary(out=None)
text += """ ==> AO Basis Functions <==\n"""
text += '\n'
text += """ [ %s ]\n""" % (self.name)
text += """ spherical\n""" if self.has_puream() else """ cartesian\n"""
text += """ ****\n"""
for uA in range(self.molecule.nunique()):
A = self.molecule.unique(uA)
if not numbersonly:
text += """ %2s %3d\n""" % (self.molecule.symbol(A), A + 1)
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
for Q in range(n_shell):
text += self.shells[Q + first_shell].pyprint(outfile=None)
text += """ ****\n"""
text += """\n"""
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
def export_for_libmints(self, role):
"""From complete BasisSet object, returns array where
triplets of elements are each unique atom label, the hash
of the string shells entry in gbs format and the
shells entry in gbs format for that label. This packaging is
intended for return to libmints BasisSet::construct_from_pydict for
instantiation of a libmints BasisSet clone of *self*.
"""
info = []
for A in range(self.molecule.natom()):
label = self.molecule.label(A)
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
atominfo = [label]
atominfo.append(self.molecule.atoms[A].shell(key=role))
if self.ecp_coreinfo:
# If core information is present, this is an ECP so we add the
# number of electrons this atom's ECP basis accounts for here.
try:
atominfo.append(self.ecp_coreinfo[label])
except KeyError:
raise ValidationError("Problem with number of cores in ECP constuction!")
for Q in range(n_shell):
atominfo.append(self.shells[Q + first_shell].aslist())
info.append(atominfo)
return info
def print_detail_gamess(self, out=None, numbersonly=False):
"""Prints a detailed PSI3-style summary of the basis (per-atom)
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
if not numbersonly:
text += self.print_summary(out=None)
text += """ ==> AO Basis Functions <==\n"""
text += '\n'
text += """ [ %s ]\n""" % (self.name)
text += """ spherical\n""" if self.has_puream() else """ cartesian\n"""
text += """ ****\n"""
for uA in range(self.molecule.nunique()):
A = self.molecule.unique(uA)
if not numbersonly:
text += """%s\n""" % (qcel.periodictable.to_element(self.molecule.Z(A)))
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
for Q in range(n_shell):
text += self.shells[Q + first_shell].pyprint_gamess(outfile=None)
#text += """ ****\n"""
text += """\n"""
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
def print_detail_cfour(self, out=None):
"""Returns a string in CFOUR-style of the basis (per-atom)
* Format from http://slater.chemie.uni-mainz.de/cfour/index.php?n=Main.OldFormatOfAnEntryInTheGENBASFile
"""
text = ''
for uA in range(self.molecule.nunique()):
A = self.molecule.unique(uA)
text += """%s:P4_%d\n""" % (self.molecule.symbol(A), A + 1)
text += """Psi4 basis %s for element %s atom %d\n\n""" % \
(self.name.upper(), self.molecule.symbol(A), A + 1)
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
max_am_center = 0
for Q in range(n_shell):
max_am_center = self.shells[Q + first_shell].am() if \
self.shells[Q + first_shell].am() > max_am_center else max_am_center
shell_per_am = [[] for i in range(max_am_center + 1)]
for Q in range(n_shell):
shell_per_am[self.shells[Q + first_shell].am()].append(Q)
# Write number of shells in the basis set
text += """%3d\n""" % (max_am_center + 1)
# Write angular momentum for each shell
for am in range(max_am_center + 1):
text += """%5d""" % (am)
text += '\n'
# Write number of contracted basis functions for each shell
for am in range(max_am_center + 1):
text += """%5d""" % (len(shell_per_am[am]))
text += '\n'
exp_per_am = [[] for i in range(max_am_center + 1)]
coef_per_am = [[] for i in range(max_am_center + 1)]
for am in range(max_am_center + 1):
# Collect unique exponents among all functions
for Q in range(len(shell_per_am[am])):
for K in range(self.shells[shell_per_am[am][Q] + first_shell].nprimitive()):
if self.shells[shell_per_am[am][Q] + first_shell].exp(K) not in exp_per_am[am]:
exp_per_am[am].append(self.shells[shell_per_am[am][Q] + first_shell].exp(K))
# Collect coefficients for each exp among all functions, zero otherwise
for Q in range(len(shell_per_am[am])):
K = 0
for ep in range(len(exp_per_am[am])):
if abs(exp_per_am[am][ep] - self.shells[shell_per_am[am][Q] + first_shell].exp(K)) < 1.0e-8:
coef_per_am[am].append(self.shells[shell_per_am[am][Q] + first_shell].original_coef(K))
if (K + 1) != self.shells[shell_per_am[am][Q] + first_shell].nprimitive():
K += 1
else:
coef_per_am[am].append(0.0)
# Write number of exponents for each shell
for am in range(max_am_center + 1):
text += """%5d""" % (len(exp_per_am[am]))
text += '\n\n'
for am in range(max_am_center + 1):
# Write exponents for each shell
for ep in range(len(exp_per_am[am])):
text += """%14.7f""" % (exp_per_am[am][ep])
if ((ep + 1) % 5 == 0) or ((ep + 1) == len(exp_per_am[am])):
text += '\n'
text += '\n'
# Write contraction coefficients for each shell
for ep in range(len(exp_per_am[am])):
for bf in range(len(shell_per_am[am])):
text += """%10.7f """ % (coef_per_am[am][bf * len(exp_per_am[am]) + ep])
text += '\n'
text += '\n'
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
# <<< Misc. Methods >>>
def refresh(self):
"""Refresh internal basis set data. Useful if someone has pushed
to shells. Pushing to shells happens in the BasisSetParsers, so
the parsers will call refresh(). This function is now defunct.
"""
raise FeatureNotImplemented('BasisSet::refresh')
@staticmethod
def make_filename(name):
"""Converts basis set name to a compatible filename.
* @param basisname Basis name
* @return Compatible file name.
"""
# Modify the name of the basis set to generate a filename: STO-3G -> sto-3g
basisname = name
# First make it lower case
basisname = basisname.lower()
# Replace all '(' with '_'
basisname = basisname.replace('(', '_')
# Replace all ')' with '_'
basisname = basisname.replace(')', '_')
# Replace all ',' with '_'
basisname = basisname.replace(',', '_')
# Replace all '*' with 's'
basisname = basisname.replace('*', 's')
# Replace all '+' with 'p'
basisname = basisname.replace('+', 'p')
# Add file extension
basisname += '.gbs'
return basisname
@staticmethod
def decontract(shells):
"""Procedure applied to list to ShellInfo-s *shells* that returns
another list of shells, one for every AM and exponent pair in the input
list. Decontracts the shells.
"""
# vector of uncontracted shells to return
shell_list = []
# map of AM to a vector of exponents for duplicate basis functions check
exp_map = collections.defaultdict(list)
for shell in shells:
am = shell.am()
pure = shell.is_pure()
nc = shell.ncenter()
center = shell.center
start = shell.start
for prim in range(shell.nprimitive()):
exp = shell.exp(prim)
unique = True
for _exp in exp_map[am]:
if abs(exp - _exp) < 1.0e-6:
unique = False
if unique:
us = ShellInfo(am, [1.0], [exp],
'Pure' if pure else 'Cartesian',
nc, center, start, 'Unnormalized')
shell_list.append(us)
exp_map[am].append(exp)
return shell_list
# <<< Methods not Implemented >>>
def zero_so_basis_set(cls, factory):
""" **NYI** Returns an empty SO basis set object.
* Returns an SOBasis object that actually has a single s-function
* at the origin with an exponent of 0.0 and contraction of 1.0.
* @return A new empty SOBasis object.
"""
raise FeatureNotImplemented('BasisSet::zero_so_basis_set') # FINAL
@staticmethod
def test_basis_set(max_am):
"""Returns a shell-labeled test basis set object
* @param max_am maximum angular momentum to build
* @return pair containing shell labels and four-center
* test basis for use in benchmarking
* See libmints/benchmark.cc for details
The libmints version seems not to have been updated along with the classes.
"""
raise FeatureNotImplemented('BasisSet::test_basis_set')
def get_ao_sorted_shell(self, i):
"""Returns the value of the sorted shell list. Defunct"""
raise FeatureNotImplemented('BasisSet::get_ao_sorted_shell')
def get_ao_sorted_list(self):
"""Returns the vector of sorted shell list. Defunct"""
raise FeatureNotImplemented('BasisSet::get_ao_sorted_list')
def compute_phi(self, phi_ao, x, y, z):
"""Returns the values of the basis functions at a point"""
phi_ao = [0.0] * self.nao()
ao = 0
for ns in range(self.nshell()):
shell = self.shells[ns]
am = shell.am()
nprim = shell.nprimitive()
a = shell.exps()
c = shell.coefs()
xyz = shell.center()
dx = x - xyz[0]
dy = y - xyz[1]
dz = z - xyz[2]
rr = dx * dx + dy * dy + dz * dz
cexpr = 0
for np in range(nprim):
cexpr += c[np] * math.exp(-a[np] * rr)
for l in range(INT_NCART(am)):
components = exp_ao[am][l]
phi_ao[ao + l] += pow(dx, components[0]) * \
pow(dy, components[1]) * \
pow(dz, components[2]) * \
cexpr
ao += INT_NCART(am)
def concatenate(self, b):
"""Concatenates two basis sets together into a new basis without
reordering anything. Unless you know what you're doing, you should
use the '+' operator instead of this method. Appears defunct.
"""
raise FeatureNotImplemented('BasisSet::concatenate')
def add(self, b):
"""Adds this plus another basis set and returns the result.
Equivalent to the '+' operator. Appears defunct.
"""
raise FeatureNotImplemented('BasisSet::add')
@staticmethod
def shell_sorter_ncenter(d1, d2):
return d1.ncenter() < d2.ncenter()
@staticmethod
def shell_sorter_am(d1, d2):
return d1.am() < d2.am()
def _basis_file_warner_and_aliaser(filename):
aliased_in_1p4 = {
"def2-qzvp-jkfit": "def2-universal-jkfit",
"def2-qzvpp-jkfit": "def2-universal-jkfit",
"def2-sv_p_-jkfit": "def2-universal-jkfit",
"def2-svp-jkfit": "def2-universal-jkfit",
"def2-tzvp-jkfit": "def2-universal-jkfit",
"def2-tzvpp-jkfit": "def2-universal-jkfit",
"def2-qzvp-jfit": "def2-universal-jfit",
"def2-qzvpp-jfit": "def2-universal-jfit",
"def2-sv_p_-jfit": "def2-universal-jfit",
"def2-svp-jfit": "def2-universal-jfit",
"def2-tzvp-jfit": "def2-universal-jfit",
"def2-tzvpp-jfit": "def2-universal-jfit",
}
for k, v in aliased_in_1p4.items():
if filename.endswith(k + ".gbs"):
warnings.warn(
f"Using basis set `{k}` instead of its generic name `{v}` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return filename.replace(k, v)
else:
return filename
|
jturney/psi4
|
psi4/driver/qcdb/libmintsbasisset.py
|
Python
|
lgpl-3.0
| 65,736
|
[
"CFOUR",
"Gaussian",
"Psi4"
] |
584e5577fa119f2f5dbd7f576f662e3267ca53abbef11e4c65d558d8d662a5a5
|
"""Learn to estimate functions from examples. (Chapters 18-20)"""
from utils import (
removeall, unique, product, mode, argmax, argmax_random_tie, isclose,
dotproduct, vector_add, scalar_vector_product, weighted_sample_with_replacement,
weighted_sampler, num_or_str, normalize, clip, sigmoid, print_table, DataFile
)
import copy
import heapq
import math
import random
from statistics import mean
from collections import defaultdict
# ______________________________________________________________________________
def rms_error(predictions, targets):
return math.sqrt(ms_error(predictions, targets))
def ms_error(predictions, targets):
return mean([(p - t)**2 for p, t in zip(predictions, targets)])
def mean_error(predictions, targets):
return mean([abs(p - t) for p, t in zip(predictions, targets)])
def manhattan_distance(predictions, targets):
return sum([abs(p - t) for p, t in zip(predictions, targets)])
def mean_boolean_error(predictions, targets):
return mean(int(p != t) for p, t in zip(predictions, targets))
def hamming_distance(predictions, targets):
return sum(p != t for p, t in zip(predictions, targets))
# ______________________________________________________________________________
class DataSet:
"""A data set for a machine learning problem. It has the following fields:
d.examples A list of examples. Each one is a list of attribute values.
d.attrs A list of integers to index into an example, so example[attr]
gives a value. Normally the same as range(len(d.examples[0])).
d.attrnames Optional list of mnemonic names for corresponding attrs.
d.target The attribute that a learning algorithm will try to predict.
By default the final attribute.
d.inputs The list of attrs without the target.
d.values A list of lists: each sublist is the set of possible
values for the corresponding attribute. If initially None,
it is computed from the known examples by self.setproblem.
If not None, an erroneous value raises ValueError.
d.distance A function from a pair of examples to a nonnegative number.
Should be symmetric, etc. Defaults to mean_boolean_error
since that can handle any field types.
d.name Name of the data set (for output display only).
d.source URL or other source where the data came from.
d.exclude A list of attribute indexes to exclude from d.inputs. Elements
of this list can either be integers (attrs) or attrnames.
Normally, you call the constructor and you're done; then you just
access fields like d.examples and d.target and d.inputs."""
def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
inputs=None, values=None, distance=mean_boolean_error,
name='', source='', exclude=()):
"""Accepts any of DataSet's fields. Examples can also be a
string or file from which to parse examples using parse_csv.
Optional parameter: exclude, as documented in .setproblem().
>>> DataSet(examples='1, 2, 3')
<DataSet(): 1 examples, 3 attributes>
"""
self.name = name
self.source = source
self.values = values
self.distance = distance
if values is None:
self.got_values_flag = False
else:
self.got_values_flag = True
# Initialize .examples from string or list or data directory
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(DataFile(name + '.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
if attrs is None and self.examples is not None:
attrs = list(range(len(self.examples[0])))
self.attrs = attrs
# Initialize .attrnames from string, list, or by default
if isinstance(attrnames, str):
self.attrnames = attrnames.split()
else:
self.attrnames = attrnames or attrs
self.setproblem(target, inputs=inputs, exclude=exclude)
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = list(map(self.attrnum, exclude))
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.update_values()
self.check_me()
def check_me(self):
"""Check that my fields make sense."""
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
if self.got_values_flag:
# only check if values are provided while initializing DataSet
list(map(self.check_example, self.examples))
def add_example(self, example):
"""Add an example to the list of examples, checking it first."""
self.check_example(example)
self.examples.append(example)
def check_example(self, example):
"""Raise ValueError if example has any invalid values."""
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value {} for attribute {} in {}'
.format(example[a], self.attrnames[a], example))
def attrnum(self, attr):
"""Returns the number used for attr, which can be a name, or -n .. n-1."""
if isinstance(attr, str):
return self.attrnames.index(attr)
elif attr < 0:
return len(self.attrs) + attr
else:
return attr
def update_values(self):
self.values = list(map(unique, zip(*self.examples)))
def sanitize(self, example):
"""Return a copy of example, with non-input attributes replaced by None."""
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
def classes_to_numbers(self, classes=None):
"""Converts class names to numbers."""
if not classes:
# If classes were not given, extract them from values
classes = sorted(self.values[self.target])
for item in self.examples:
item[self.target] = classes.index(item[self.target])
def remove_examples(self, value=""):
"""Remove examples that contain given value."""
self.examples = [x for x in self.examples if value not in x]
self.update_values()
def __repr__(self):
return '<DataSet({}): {:d} examples, {:d} attributes>'.format(
self.name, len(self.examples), len(self.attrs))
# ______________________________________________________________________________
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]"""
lines = [line for line in input.splitlines() if line.strip()]
return [list(map(num_or_str, line.split(delim))) for line in lines]
# ______________________________________________________________________________
class CountingProbDist:
"""A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
self.dictionary = {}
self.n_obs = 0.0
self.default = default
self.sampler = None
for o in observations:
self.add(o)
def add(self, o):
"""Add an observation o to the distribution."""
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"""Return an estimate of the probability of item."""
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"""Return (count, obs) tuples for the n most frequent observations."""
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
def sample(self):
"""Return a random sample from the distribution."""
if self.sampler is None:
self.sampler = weighted_sampler(list(self.dictionary.keys()),
list(self.dictionary.values()))
return self.sampler()
# ______________________________________________________________________________
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"""Always return same result: the most popular from the training set."""
return most_popular
return predict
# ______________________________________________________________________________
def NaiveBayesLearner(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
targetvals = dataset.values[dataset.target]
target_dist = CountingProbDist(targetvals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr])
for gv in targetvals
for attr in dataset.inputs}
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, key=class_probability)
return predict
# ______________________________________________________________________________
def NearestNeighborLearner(dataset, k=1):
"""k-NearestNeighbor: the k nearest neighbors vote."""
def predict(example):
"""Find the k closest items, and have them vote for the best."""
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
# ______________________________________________________________________________
class DecisionFork:
"""A fork of a decision tree holds an attribute to test, and a dict
of branches, one for each of the attribute's values."""
def __init__(self, attr, attrname=None, branches=None):
"""Initialize by saying what attribute this node tests."""
self.attr = attr
self.attrname = attrname or attr
self.branches = branches or {}
def __call__(self, example):
"""Given an example, classify it using the attribute and the branches."""
attrvalue = example[self.attr]
return self.branches[attrvalue](example)
def add(self, val, subtree):
"""Add a branch. If self.attr = val, go to the given subtree."""
self.branches[val] = subtree
def display(self, indent=0):
name = self.attrname
print('Test', name)
for (val, subtree) in self.branches.items():
print(' ' * 4 * indent, name, '=', val, '==>', end=' ')
subtree.display(indent + 1)
def __repr__(self):
return ('DecisionFork({0!r}, {1!r}, {2!r})'
.format(self.attr, self.attrname, self.branches))
class DecisionLeaf:
"""A leaf of a decision tree holds just a result."""
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self, indent=0):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
# ______________________________________________________________________________
def DecisionTreeLearner(dataset):
"""[Figure 18.5]"""
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A])
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
key=lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
"""Count the number of examples that have attr = val."""
return sum(e[attr] == val for e in examples)
def all_same_class(examples):
"""Are all these examples in the same target class?"""
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"""Choose the attribute with the highest information gain."""
return argmax_random_tie(attrs,
key=lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"""Return the expected reduction in entropy from splitting by attr."""
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"""Return a list of (val, examples) pairs for each val of attr."""
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"""Number of bits to represent the probability distribution in values."""
probabilities = normalize(removeall(0, values))
return sum(-p * math.log2(p) for p in probabilities)
# ______________________________________________________________________________
# A decision list is implemented as a list of (test, value) pairs.
def DecisionListLearner(dataset):
"""[Figure 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Exception
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
raise NotImplementedError
def passes(example, test):
"""Does the example pass the test?"""
raise NotImplementedError
def predict(example):
"""Predict the outcome for the first passing test."""
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
# ______________________________________________________________________________
def NeuralNetLearner(dataset, hidden_layer_sizes=[3],
learning_rate=0.01, epochs=100):
"""Layered feed-forward network.
hidden_layer_sizes: List of number of hidden units per hidden layer
learning_rate: Learning rate of gradient descent
epochs: Number of passes over the dataset
"""
i_units = len(dataset.inputs)
o_units = 1 # As of now, dataset.target gives only one index.
# construct a network
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net,
learning_rate, epochs)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
pred = [o_nodes[i].value for i in range(o_units)]
return 1 if pred[0] >= 0.5 else 0
return predict
class NNUnit:
"""Single Unit of Multiple Layer Neural Network
inputs: Incoming connections
weights: Weights to incoming connections
"""
def __init__(self, weights=None, inputs=None):
self.weights = []
self.inputs = []
self.value = None
self.activation = sigmoid
def network(input_units, hidden_layer_sizes, output_units):
"""Create Directed Acyclic Network of given number layers.
hidden_layers_sizes : List number of neuron units in each hidden layer
excluding input and output layers
"""
# Check for PerceptronLearner
if hidden_layer_sizes:
layers_sizes = [input_units] + hidden_layer_sizes + [output_units]
else:
layers_sizes = [input_units] + [output_units]
net = [[NNUnit() for n in range(size)]
for size in layers_sizes]
n_layers = len(net)
# Make Connection
for i in range(1, n_layers):
for n in net[i]:
for k in net[i-1]:
n.inputs.append(k)
n.weights.append(0)
return net
def BackPropagationLearner(dataset, net, learning_rate, epochs):
"""[Figure 18.23] The back-propagation algorithm for multilayer network"""
# Initialise weights
for layer in net:
for node in layer:
node.weights = [random.uniform(-0.5, 0.5)
for i in range(len(node.weights))]
examples = dataset.examples
'''
As of now dataset.target gives an int instead of list,
Changing dataset class will have effect on all the learners.
Will be taken care of later
'''
idx_t = [dataset.target]
idx_i = dataset.inputs
n_layers = len(net)
o_nodes = net[-1]
i_nodes = net[0]
for epoch in range(epochs):
# Iterate over each example
for e in examples:
i_val = [e[i] for i in idx_i]
t_val = [e[i] for i in idx_t]
# Activate input layer
for v, n in zip(i_val, i_nodes):
n.value = v
# Forward pass
for layer in net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Initialize delta
delta = [[] for i in range(n_layers)]
# Compute outer layer delta
o_units = len(o_nodes)
err = [t_val[i] - o_nodes[i].value
for i in range(o_units)]
delta[-1] = [(o_nodes[i].value) * (1 - o_nodes[i].value) *
(err[i]) for i in range(o_units)]
# Backward pass
h_layers = n_layers - 2
for i in range(h_layers, 0, -1):
layer = net[i]
h_units = len(layer)
nx_layer = net[i+1]
# weights from each ith layer node to each i + 1th layer node
w = [[node.weights[k] for node in nx_layer]
for k in range(h_units)]
delta[i] = [(layer[j].value) * (1 - layer[j].value) *
dotproduct(w[j], delta[i+1])
for j in range(h_units)]
# Update weights
for i in range(1, n_layers):
layer = net[i]
inc = [node.value for node in net[i-1]]
units = len(layer)
for j in range(units):
layer[j].weights = vector_add(layer[j].weights,
scalar_vector_product(
learning_rate * delta[i][j], inc))
return net
def PerceptronLearner(dataset, learning_rate=0.01, epochs=100):
"""Logistic Regression, NO hidden layer"""
i_units = len(dataset.inputs)
o_units = 1 # As of now, dataset.target gives only one index.
hidden_layer_sizes = []
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net, learning_rate, epochs)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
pred = [o_nodes[i].value for i in range(o_units)]
return 1 if pred[0] >= 0.5 else 0
return predict
# ______________________________________________________________________________
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
"""Define with learner = LinearLearner(data); infer with learner(x)."""
idx_i = dataset.inputs
idx_t = dataset.target # As of now, dataset.target gives only one index.
examples = dataset.examples
num_examples = len(examples)
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# Add dummy
ones = [1 for _ in range(len(examples))]
X_col = [ones] + X_col
# Initialize random weigts
w = [random.uniform(-0.5, 0.5) for _ in range(len(idx_i) + 1)]
for epoch in range(epochs):
err = []
# Pass over all examples
for example in examples:
x = [1] + example
y = dotproduct(w, x)
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
w[i] = w[i] + learning_rate * (dotproduct(err, X_col[i]) / num_examples)
def predict(example):
x = [1] + example
return dotproduct(w, x)
return predict
# ______________________________________________________________________________
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
# ______________________________________________________________________________
def AdaBoost(L, K):
"""[Figure 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1. / (2 * N)
w = [1. / N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1 - epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
def WeightedMajority(predictors, weights):
"""Return a predictor that takes a weighted vote."""
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'
"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(list(totals.keys()), key=totals.get)
# _____________________________________________________________________________
# Adapting an unweighted learner for AdaBoost
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
def replicated_dataset(dataset, weights, n=None):
"""Copy dataset, replicating each example in proportion to its weight."""
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']
"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w * n) for w in weights]
fractions = [(w * n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes)) +
weighted_sample_with_replacement(n - sum(wholes), seq, fractions))
def flatten(seqs): return sum(seqs, [])
# _____________________________________________________________________________
# Functions for testing learners on examples
def test(predict, dataset, examples=None, verbose=0):
"""Return the proportion of the examples that are NOT correctly predicted."""
if examples is None:
examples = dataset.examples
if len(examples) == 0:
return 0.0
right = 0.0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print(' OK: got {} for {}'.format(desired, example))
elif verbose:
print('WRONG: got {}, expected {} for {}'.format(
output, desired, example))
return 1 - (right / len(examples))
def train_and_test(dataset, start, end):
"""Reserve dataset.examples[start:end] for test; train on the remainder."""
start = int(start)
end = int(end)
examples = dataset.examples
train = examples[:start] + examples[end:]
val = examples[start:end]
return train, val
def cross_validation(learner, size, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; if trials>1, average over several shuffles.
Returns Training error, Validataion error"""
if k is None:
k = len(dataset.examples)
if trials > 1:
trial_errT = 0
trial_errV = 0
for t in range(trials):
errT, errV = cross_validation(learner, size, dataset,
k=10, trials=1)
trial_errT += errT
trial_errV += errV
return trial_errT / trials, trial_errV / trials
else:
fold_errT = 0
fold_errV = 0
n = len(dataset.examples)
examples = dataset.examples
for fold in range(k):
random.shuffle(dataset.examples)
train_data, val_data = train_and_test(dataset, fold * (n / k),
(fold + 1) * (n / k))
dataset.examples = train_data
h = learner(dataset, size)
fold_errT += test(h, dataset, train_data)
fold_errV += test(h, dataset, val_data)
# Reverting back to original once test is completed
dataset.examples = examples
return fold_errT / k, fold_errV / k
def cross_validation_wrapper(learner, dataset, k=10, trials=1):
"""[Fig 18.8]
Return the optimal value of size having minimum error
on validataion set.
err_train: A training error array, indexed by size
err_val: A validataion error array, indexed by size
"""
err_val = []
err_train = []
size = 1
while True:
errT, errV = cross_validation(learner, size, dataset, k)
# Check for convergence provided err_val is not empty
if (err_val and isclose(err_val[-1], errV, rel_tol=1e-6)):
best_size = size
return learner(dataset, best_size)
err_val.append(errV)
err_train.append(errT)
print(err_val)
size += 1
def leave_one_out(learner, dataset, size=None):
"""Leave one out cross-validation over the dataset."""
return cross_validation(learner, size, dataset, k=len(dataset.examples))
def learningcurve(learner, dataset, trials=10, sizes=None):
if sizes is None:
sizes = list(range(2, len(dataset.examples) - 10, 2))
def score(learner, size):
random.shuffle(dataset.examples)
return train_and_test(learner, dataset, 0, size)
return [(size, mean([score(learner, size) for t in range(trials)]))
for size in sizes]
# ______________________________________________________________________________
# The rest of this file gives datasets for machine learning problems.
orings = DataSet(name='orings', target='Distressed',
attrnames="Rings Distressed Temp Pressure Flightnum")
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attrnames="name hair feathers eggs milk airborne aquatic " +
"predator toothed backbone breathes venomous fins legs tail " +
"domestic catsize type")
iris = DataSet(name="iris", target="class",
attrnames="sepal-len sepal-width petal-len petal-width class")
# ______________________________________________________________________________
# The Restaurant example from [Figure 18.2]
def RestaurantDataSet(examples=None):
"""Build a DataSet of Restaurant waiting examples. [Figure 18.3]"""
return DataSet(name='restaurant', target='Wait', examples=examples,
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price ' +
'Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attrname, branches):
branches = {value: (child if isinstance(child, DecisionFork)
else DecisionLeaf(child))
for value, child in branches.items()}
return DecisionFork(restaurant.attrnum(attrname), attrname, branches)
""" [Figure 18.2]
A decision tree for deciding whether to wait for a table at a hotel.
"""
waiting_decision_tree = T('Patrons',
{'None': 'No', 'Some': 'Yes',
'Full': T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60': T('Alternate',
{'No': T('Reservation',
{'Yes': 'Yes',
'No': T('Bar', {'No': 'No',
'Yes': 'Yes'})}),
'Yes': T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}
),
'10-30': T('Hungry',
{'No': 'Yes',
'Yes': T('Alternate',
{'No': 'Yes',
'Yes': T('Raining',
{'No': 'No',
'Yes': 'Yes'})})})})})
def SyntheticRestaurant(n=20):
"""Generate a DataSet with n examples."""
def gen():
example = list(map(random.choice, restaurant.values))
example[restaurant.target] = waiting_decision_tree(example)
return example
return RestaurantDataSet([gen() for i in range(n)])
# ______________________________________________________________________________
# Artificial, generated datasets.
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k / 2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
def Parity(k, n, name="parity"):
"""Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name="xor")
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
# ______________________________________________________________________________
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner', '')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
|
sofmonk/aima-python
|
learning.py
|
Python
|
mit
| 37,159
|
[
"NEURON"
] |
04917ad67b21ab38a421c4e4ff60736d397970a7b204262c4f08608d6b84402a
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Rsem(MakefilePackage):
"""RSEM is a software package for estimating gene and isoform expression
levels from RNA-Seq data."""
homepage = "http://deweylab.github.io/RSEM/"
url = "https://github.com/deweylab/RSEM/archive/v1.3.0.tar.gz"
version('1.3.0', '273fd755e23d349cc38a079b81bb03b6')
depends_on('r', type=('build', 'run'))
depends_on('perl', type=('build', 'run'))
depends_on('python', type=('build', 'run'))
depends_on('bowtie')
depends_on('bowtie2')
depends_on('star')
def install(self, spec, prefix):
make('install', 'DESTDIR=%s' % prefix, 'prefix=')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/rsem/package.py
|
Python
|
lgpl-2.1
| 1,887
|
[
"Bowtie"
] |
b397566980b89f6637356ab7ecf47b5f71b6c134545feba96ef38a7c317bcead
|
"""Cronjob like scheduling abstraction in Python.
This module contains the functionality used by the Kodi Alarm clock
addon and is largely based on a Stackoverflow answer by
Brian on http://stackoverflow.com/questions/
373335/suggestions-for-a-cron-like-scheduler-in-python/374207#374207"""
from datetime import datetime, timedelta
import time
class CronTab(object):
"""Simulates basic cron functionality by checking for firing jobs every
minute."""
def __init__(self, xbmc):
self.xbmc = xbmc
self.jobs = []
self.__enabled = True
def stop(self):
"""Stops the crontab."""
self.__enabled = False
def start(self):
"""Starts to check every minute, if the registered jobs should run."""
cron_time_tuple = datetime(*datetime.now().timetuple()[:5])
while self.__enabled:
if self.xbmc and not self.xbmc.abortRequested:
for job in self.jobs:
self.xbmc.log("checking job %s against %s" %
(str(job), str(cron_time_tuple)),
self.xbmc.LOGDEBUG)
job.check(cron_time_tuple)
cron_time_tuple += timedelta(minutes=1)
if datetime.now() < cron_time_tuple:
if self.xbmc:
self.xbmc.sleep((cron_time_tuple -
datetime.now()).seconds * 1000)
else:
time.sleep((cron_time_tuple - datetime.now()).seconds)
class AllMatch(set):
"""Universal set - match everything"""
def __contains__(self, item):
return True
class Job(object):
# pylint: disable=too-many-instance-attributes,too-many-arguments
"""Cron job abstraction."""
@staticmethod
def conv_to_set(obj):
"""Convert obj to a set containing obj if necessary."""
if isinstance(obj, (int, long)):
return set([obj])
if not isinstance(obj, set):
obj = set(obj)
return obj
def __init__(self, action, minute=AllMatch(), hour=AllMatch(),
day=AllMatch(), month=AllMatch(), dow=AllMatch(),
args=(), kwargs=None):
self.mins = Job.conv_to_set(minute)
self.hours = Job.conv_to_set(hour)
self.days = Job.conv_to_set(day)
self.months = Job.conv_to_set(month)
self.dow = Job.conv_to_set(dow)
self.action = action
self.args = args
if kwargs is None:
kwargs = {}
self.kwargs = kwargs
def __str__(self):
return str(self.mins) + ", " + str(self.hours) + ", "\
+ str(self.days) + ", " + str(self.months) + ", "\
+ str(self.dow) + ", " + str(self.action) + ", "\
+ str(self.args) + ", " + str(self.kwargs)
def is_matchtime(self, cron_time_tuple):
"""Is it the job's scheduled time"""
return ((cron_time_tuple.minute in self.mins) and
(cron_time_tuple.hour in self.hours) and
(cron_time_tuple.day in self.days) and
(cron_time_tuple.month in self.months) and
(cron_time_tuple.weekday() in self.dow))
def check(self, cron_time_tuple):
"""Checks if it is the scheduled time and executes the job if so."""
if self.is_matchtime(cron_time_tuple):
self.action(*self.args, **self.kwargs)
|
remigius42/script.service.alarmclock
|
resources/lib/cronjobs.py
|
Python
|
gpl-3.0
| 3,455
|
[
"Brian"
] |
ff4bdbd6f732c5cffd44388a1e50092eb44e6b4f51ed4e5c6d2f82ef05d4ba60
|
from __future__ import print_function
import traceback
import sys
import threading
from contextlib import contextmanager
from stat import S_IXOTH
from os import pardir, stat, chmod, access, X_OK, pathsep, environ
from os import makedirs, listdir
from os.path import join, dirname, isfile, split
from os.path import exists
from tempfile import mkdtemp
from shutil import rmtree
import time
from sys import version_info
import webob
from webtest import TestApp
from webtest.http import StopableWSGIServer
import galaxy.util
from galaxy.util.bunch import Bunch
from galaxy.jobs.metrics import NULL_JOB_INSTRUMENTER
from pulsar.tools import ToolBox
from pulsar.managers.base import JobDirectory
from pulsar.web.framework import file_response
if version_info < (2, 7):
from unittest2 import TestCase, skip
else:
from unittest import TestCase, skip
try:
from nose.tools import nottest
except ImportError:
def nottest(x):
return x
TEST_DIR = dirname(__file__)
ROOT_DIR = join(TEST_DIR, pardir)
class TempDirectoryTestCase(TestCase):
def setUp(self):
self.temp_directory = mkdtemp()
def tearDown(self):
rmtree(self.temp_directory)
def get_test_toolbox():
toolbox_path = join(dirname(__file__), pardir, "test_data", "test_shed_toolbox.xml")
toolbox = ToolBox(toolbox_path)
return toolbox
def get_test_tool():
return get_test_toolbox().get_tool("tool1")
class TestManager(object):
def setup_temp_directory(self):
self.temp_directory = mkdtemp()
self.__job_directory = JobDirectory(self.temp_directory, '1')
def cleanup_temp_directory(self):
rmtree(self.temp_directory)
def job_directory(self, job_id):
return self.__job_directory
@contextmanager
def test_job_directory():
with temp_directory() as directory:
yield JobDirectory(directory, '1')
@contextmanager
def temp_directory():
directory = mkdtemp()
try:
yield directory
finally:
rmtree(directory)
@contextmanager
def test_manager():
manager = TestManager()
manager.setup_temp_directory()
yield manager
manager.cleanup_temp_directory()
class TestAuthorization(object):
def __init__(self):
self.allow_setup = True
self.allow_tool_file = True
self.allow_execution = True
self.allow_config = True
def authorize_setup(self):
if not self.allow_setup:
raise Exception
def authorize_tool_file(self, name, contents):
if not self.allow_tool_file:
raise Exception
def authorize_execution(self, job_directory, command_line):
if not self.allow_execution:
raise Exception
def authorize_config_file(self, job_directory, name, path):
if not self.allow_config:
raise Exception
class TestDependencyManager(object):
def dependency_shell_commands(self, requirements, **kwds):
return []
class BaseManagerTestCase(TestCase):
def setUp(self):
self.app = minimal_app_for_managers()
self.staging_directory = self.app.staging_directory
self.authorizer = self.app.authorizer
def tearDown(self):
rmtree(self.staging_directory)
@nottest
def _test_simple_execution(self, manager):
command = """python -c "import sys; sys.stdout.write(\'Hello World!\'); sys.stderr.write(\'moo\')" """
job_id = manager.setup_job("123", "tool1", "1.0.0")
manager.launch(job_id, command)
while manager.get_status(job_id) not in ['complete', 'cancelled']:
pass
self.assertEquals(manager.stderr_contents(job_id), b'moo')
self.assertEquals(manager.stdout_contents(job_id), b'Hello World!')
self.assertEquals(manager.return_code(job_id), 0)
manager.clean(job_id)
self.assertEquals(len(listdir(self.staging_directory)), 0)
def _test_cancelling(self, manager):
job_id = manager.setup_job("124", "tool1", "1.0.0")
command = self._python_to_command("import time; time.sleep(1000)")
manager.launch(job_id, command)
time.sleep(0.05)
manager.kill(job_id)
manager.kill(job_id) # Make sure kill doesn't choke if pid doesn't exist
self._assert_status_becomes_cancelled(job_id, manager)
manager.clean(job_id)
def _python_to_command(self, code, quote='"'):
assert '"' not in code
return 'python -c "%s"' % "; ".join(code.split("\n"))
def _assert_status_becomes_cancelled(self, job_id, manager):
i = 0
while True:
i += 1
status = manager.get_status(job_id)
if status in ["complete", "failed"]:
raise AssertionError("Expected cancelled status but got %s." % status)
elif status == "cancelled":
break
time.sleep(0.01)
if i > 100: # Wait one second
raise AssertionError("Job failed to cancel quickly.")
def minimal_app_for_managers():
""" Minimimal app description for consumption by managers.
"""
staging_directory = mkdtemp()
rmtree(staging_directory)
authorizer = TestAuthorizer()
return Bunch(staging_directory=staging_directory,
authorizer=authorizer,
job_metrics=NullJobMetrics(),
dependency_manager=TestDependencyManager())
class NullJobMetrics(object):
def __init__(self):
self.default_job_instrumenter = NULL_JOB_INSTRUMENTER
@nottest
@contextmanager
def server_for_test_app(app):
try:
from paste.exceptions.errormiddleware import ErrorMiddleware
error_app = ErrorMiddleware(app.app, debug=True, error_log="errors.log")
server = StopableWSGIServer.create(error_app)
except ImportError:
# paste.exceptions not available for Python 3.
error_app = app.app
server = StopableWSGIServer.create(error_app)
try:
server.wait()
yield server
finally:
server.shutdown()
# There seem to be persistent transient problems with the testing, sleeping
# between creation of test app instances for greater than .5 seconds seems
# to help (async loop length in code is .5 so this maybe makes some sense?)
if "TEST_WEBAPP_POST_SHUTDOWN_SLEEP" in environ:
time.sleep(int(environ.get("TEST_WEBAPP_POST_SHUTDOWN_SLEEP")))
@nottest
@contextmanager
def test_pulsar_server(global_conf={}, app_conf={}, test_conf={}):
with test_pulsar_app(global_conf, app_conf, test_conf) as app:
with server_for_test_app(app) as test_pulsar_server:
yield test_pulsar_server
class RestartablePulsarAppProvider(object):
def __init__(self, global_conf={}, app_conf={}, test_conf={}, web=True):
self.staging_directory = mkdtemp()
self.global_conf = global_conf
self.app_conf = app_conf
self.test_conf = test_conf
self.web = web
@contextmanager
def new_app(self):
with test_pulsar_app(
self.global_conf,
self.app_conf,
self.test_conf,
staging_directory=self.staging_directory,
web=self.web,
) as app:
yield app
def cleanup(self):
try:
rmtree(self.staging_directory)
except Exception:
pass
@contextmanager
def restartable_pulsar_app_provider(**kwds):
try:
has_app = RestartablePulsarAppProvider(**kwds)
yield has_app
finally:
has_app.cleanup()
@nottest
@contextmanager
def test_pulsar_app(
global_conf={},
app_conf={},
test_conf={},
staging_directory=None,
web=True,
):
clean_staging_directory = False
if staging_directory is None:
staging_directory = mkdtemp()
clean_staging_directory = True
# Make staging directory world executable for run as user tests.
mode = stat(staging_directory).st_mode
chmod(staging_directory, mode | S_IXOTH)
cache_directory = mkdtemp()
app_conf["staging_directory"] = staging_directory
app_conf["file_cache_dir"] = cache_directory
app_conf["ensure_cleanup"] = True
try:
with _yield_app(global_conf, app_conf, test_conf, web) as app:
yield app
finally:
to_clean = [cache_directory]
if clean_staging_directory:
to_clean.append(staging_directory)
for directory in to_clean:
try:
rmtree(directory)
pass
except Exception:
pass
@contextmanager
def _yield_app(global_conf, app_conf, test_conf, web):
# Yield either wsgi webapp of the underlying pulsar
# app object if the web layer is not needed.
try:
if web:
from pulsar.web.wsgi import app_factory
app = app_factory(global_conf, **app_conf)
yield TestApp(app, **test_conf)
else:
from pulsar.main import load_app_configuration
from pulsar.core import PulsarApp
app_conf = load_app_configuration(local_conf=app_conf)
app = PulsarApp(**app_conf)
yield app
finally:
try:
shutdown_args = []
if not web:
shutdown_args.append(2)
app.shutdown(*shutdown_args)
except Exception:
pass
def skip_unless_environ(var):
if var in environ:
return lambda func: func
return skip("Environment variable %s not found, dependent test skipped." % var)
def skip_unless_executable(executable):
if _which(executable):
return lambda func: func
return skip("PATH doesn't contain executable %s" % executable)
def skip_unless_module(module):
available = True
try:
__import__(module)
except ImportError:
available = False
if available:
return lambda func: func
return skip("Module %s could not be loaded, dependent test skipped." % module)
def skip_unless_any_module(modules):
available = False
for module in modules:
try:
__import__(module)
except ImportError:
continue
available = True
if available:
return lambda func: func
return skip("None of the modules %s could be loaded, dependent test skipped." % modules)
def _which(program):
def is_exe(fpath):
return isfile(fpath) and access(fpath, X_OK)
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
for path in environ["PATH"].split(pathsep):
path = path.strip('"')
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
return None
class TestAuthorizer(object):
def __init__(self):
self.authorization = TestAuthorization()
def get_authorization(self, tool_id):
return self.authorization
class JobFilesApp(object):
def __init__(self, root_directory=None):
self.root_directory = root_directory
def __call__(self, environ, start_response):
req = webob.Request(environ)
params = req.params.mixed()
method = req.method
if method == "POST":
resp = self._post(req, params)
elif method == "GET":
resp = self._get(req, params)
else:
raise Exception("Unhandled request method %s" % method)
return resp(environ, start_response)
def _post(self, request, params):
path = params['path']
if not galaxy.util.in_directory(path, self.root_directory):
assert False, "%s not in %s" % (path, self.root_directory)
parent_directory = dirname(path)
if not exists(parent_directory):
makedirs(parent_directory)
galaxy.util.copy_to_path(params["file"].file, path)
return webob.Response(body='')
def _get(self, request, params):
path = params['path']
if not galaxy.util.in_directory(path, self.root_directory):
assert False, "%s not in %s" % (path, self.root_directory)
return file_response(path)
@contextmanager
def files_server(directory=None):
if not directory:
with temp_directory() as directory:
app = TestApp(JobFilesApp(directory))
with server_for_test_app(app) as server:
yield server, directory
else:
app = TestApp(JobFilesApp(directory))
with server_for_test_app(app) as server:
yield server
def dump_other_threads():
# Utility for debugging threads that aren't dying during
# tests.
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
print(t.getName())
traceback.print_stack(sys._current_frames()[t.ident])
|
ssorgatem/pulsar
|
test/test_utils.py
|
Python
|
apache-2.0
| 12,876
|
[
"Galaxy"
] |
d8540ac419692ec35087b58a0b919b640c3e60d0ed7668a21db378a054e88bb8
|
import warnings
import hashlib
import io
import json
import jsonschema
import pandas as pd
from toolz.curried import pipe as _pipe
from .schema import core, channels, mixins, Undefined, SCHEMA_URL
from .data import data_transformers
from ... import utils, expr
from .display import renderers, VEGALITE_VERSION, VEGAEMBED_VERSION, VEGA_VERSION
from .theme import themes
# ------------------------------------------------------------------------
# Data Utilities
def _dataset_name(values):
"""Generate a unique hash of the data
Parameters
----------
values : list or dict
A list/dict representation of data values.
Returns
-------
name : string
A unique name generated from the hash of the values.
"""
if isinstance(values, core.InlineDataset):
values = values.to_dict()
if values == [{}]:
return "empty"
values_json = json.dumps(values, sort_keys=True)
hsh = hashlib.md5(values_json.encode()).hexdigest()
return "data-" + hsh
def _consolidate_data(data, context):
"""If data is specified inline, then move it to context['datasets']
This function will modify context in-place, and return a new version of data
"""
values = Undefined
kwds = {}
if isinstance(data, core.InlineData):
if data.name is Undefined and data.values is not Undefined:
if isinstance(data.values, core.InlineDataset):
values = data.to_dict()["values"]
else:
values = data.values
kwds = {"format": data.format}
elif isinstance(data, dict):
if "name" not in data and "values" in data:
values = data["values"]
kwds = {k: v for k, v in data.items() if k != "values"}
if values is not Undefined:
name = _dataset_name(values)
data = core.NamedData(name=name, **kwds)
context.setdefault("datasets", {})[name] = values
return data
def _prepare_data(data, context=None):
"""Convert input data to data for use within schema
Parameters
----------
data :
The input dataset in the form of a DataFrame, dictionary, altair data
object, or other type that is recognized by the data transformers.
context : dict (optional)
The to_dict context in which the data is being prepared. This is used
to keep track of information that needs to be passed up and down the
recursive serialization routine, such as global named datasets.
"""
if data is Undefined:
return data
# convert dataframes or objects with __geo_interface__ to dict
if isinstance(data, pd.DataFrame) or hasattr(data, "__geo_interface__"):
data = _pipe(data, data_transformers.get())
# convert string input to a URLData
if isinstance(data, str):
data = core.UrlData(data)
# consolidate inline data to top-level datasets
if context is not None and data_transformers.consolidate_datasets:
data = _consolidate_data(data, context)
# if data is still not a recognized type, then return
if not isinstance(data, (dict, core.Data)):
warnings.warn("data of type {} not recognized".format(type(data)))
return data
# ------------------------------------------------------------------------
# Aliases & specializations
Bin = core.BinParams
@utils.use_signature(core.LookupData)
class LookupData(core.LookupData):
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export."""
copy = self.copy(deep=False)
copy.data = _prepare_data(copy.data, kwargs.get("context"))
return super(LookupData, copy).to_dict(*args, **kwargs)
@utils.use_signature(core.FacetMapping)
class FacetMapping(core.FacetMapping):
_class_is_valid_at_instantiation = False
def to_dict(self, *args, **kwargs):
copy = self.copy(deep=False)
context = kwargs.get("context", {})
data = context.get("data", None)
if isinstance(self.row, str):
copy.row = core.FacetFieldDef(**utils.parse_shorthand(self.row, data))
if isinstance(self.column, str):
copy.column = core.FacetFieldDef(**utils.parse_shorthand(self.column, data))
return super(FacetMapping, copy).to_dict(*args, **kwargs)
# ------------------------------------------------------------------------
# Encoding will contain channel objects that aren't valid at instantiation
core.FacetedEncoding._class_is_valid_at_instantiation = False
# ------------------------------------------------------------------------
# These are parameters that are valid at the top level, but are not valid
# for specs that are within a composite chart
# (layer, hconcat, vconcat, facet, repeat)
TOPLEVEL_ONLY_KEYS = {"background", "config", "autosize", "padding", "$schema"}
def _get_channels_mapping():
mapping = {}
for attr in dir(channels):
cls = getattr(channels, attr)
if isinstance(cls, type) and issubclass(cls, core.SchemaBase):
mapping[cls] = attr.replace("Value", "").lower()
return mapping
# -------------------------------------------------------------------------
# Tools for working with selections
class Selection(object):
"""A Selection object"""
_counter = 0
@classmethod
def _get_name(cls):
cls._counter += 1
return "selector{:03d}".format(cls._counter)
def __init__(self, name, selection):
if name is None:
name = self._get_name()
self.name = name
self.selection = selection
def __repr__(self):
return "Selection({0!r}, {1})".format(self.name, self.selection)
def ref(self):
return self.to_dict()
def to_dict(self):
return {
"selection": self.name.to_dict()
if hasattr(self.name, "to_dict")
else self.name
}
def __invert__(self):
return Selection(core.SelectionNot(**{"not": self.name}), self.selection)
def __and__(self, other):
if isinstance(other, Selection):
other = other.name
return Selection(
core.SelectionAnd(**{"and": [self.name, other]}), self.selection
)
def __or__(self, other):
if isinstance(other, Selection):
other = other.name
return Selection(core.SelectionOr(**{"or": [self.name, other]}), self.selection)
def __getattr__(self, field_name):
if field_name.startswith("__") and field_name.endswith("__"):
raise AttributeError(field_name)
return expr.core.GetAttrExpression(self.name, field_name)
def __getitem__(self, field_name):
return expr.core.GetItemExpression(self.name, field_name)
# ------------------------------------------------------------------------
# Top-Level Functions
def value(value, **kwargs):
"""Specify a value for use in an encoding"""
return dict(value=value, **kwargs)
def selection(name=None, type=Undefined, **kwds):
"""Create a named selection.
Parameters
----------
name : string (optional)
The name of the selection. If not specified, a unique name will be
created.
type : string
The type of the selection: one of ["interval", "single", or "multi"]
**kwds :
additional keywords will be used to construct a SelectionDef instance
that controls the selection.
Returns
-------
selection: Selection
The selection object that can be used in chart creation.
"""
return Selection(name, core.SelectionDef(type=type, **kwds))
@utils.use_signature(core.IntervalSelection)
def selection_interval(**kwargs):
"""Create a selection with type='interval'"""
return selection(type="interval", **kwargs)
@utils.use_signature(core.MultiSelection)
def selection_multi(**kwargs):
"""Create a selection with type='multi'"""
return selection(type="multi", **kwargs)
@utils.use_signature(core.SingleSelection)
def selection_single(**kwargs):
"""Create a selection with type='single'"""
return selection(type="single", **kwargs)
@utils.use_signature(core.Binding)
def binding(input, **kwargs):
"""A generic binding"""
return core.Binding(input=input, **kwargs)
@utils.use_signature(core.BindCheckbox)
def binding_checkbox(**kwargs):
"""A checkbox binding"""
return core.BindCheckbox(input="checkbox", **kwargs)
@utils.use_signature(core.BindRadioSelect)
def binding_radio(**kwargs):
"""A radio button binding"""
return core.BindRadioSelect(input="radio", **kwargs)
@utils.use_signature(core.BindRadioSelect)
def binding_select(**kwargs):
"""A select binding"""
return core.BindRadioSelect(input="select", **kwargs)
@utils.use_signature(core.BindRange)
def binding_range(**kwargs):
"""A range binding"""
return core.BindRange(input="range", **kwargs)
def condition(predicate, if_true, if_false, **kwargs):
"""A conditional attribute or encoding
Parameters
----------
predicate: Selection, PredicateComposition, expr.Expression, dict, or string
the selection predicate or test predicate for the condition.
if a string is passed, it will be treated as a test operand.
if_true:
the spec or object to use if the selection predicate is true
if_false:
the spec or object to use if the selection predicate is false
**kwargs:
additional keyword args are added to the resulting dict
Returns
-------
spec: dict or VegaLiteSchema
the spec that describes the condition
"""
test_predicates = (str, expr.Expression, core.PredicateComposition)
if isinstance(predicate, Selection):
condition = {"selection": predicate.name}
elif isinstance(predicate, core.SelectionComposition):
condition = {"selection": predicate}
elif isinstance(predicate, test_predicates):
condition = {"test": predicate}
elif isinstance(predicate, dict):
condition = predicate
else:
raise NotImplementedError(
"condition predicate of type {}" "".format(type(predicate))
)
if isinstance(if_true, core.SchemaBase):
# convert to dict for now; the from_dict call below will wrap this
# dict in the appropriate schema
if_true = if_true.to_dict()
elif isinstance(if_true, str):
if_true = {"shorthand": if_true}
if_true.update(kwargs)
condition.update(if_true)
if isinstance(if_false, core.SchemaBase):
# For the selection, the channel definitions all allow selections
# already. So use this SchemaBase wrapper if possible.
selection = if_false.copy()
selection.condition = condition
elif isinstance(if_false, str):
selection = {"condition": condition, "shorthand": if_false}
selection.update(kwargs)
else:
selection = dict(condition=condition, **if_false)
return selection
# --------------------------------------------------------------------
# Top-level objects
class TopLevelMixin(mixins.ConfigMethodMixin):
"""Mixin for top-level chart objects such as Chart, LayeredChart, etc."""
_class_is_valid_at_instantiation = False
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export"""
# We make use of three context markers:
# - 'data' points to the data that should be referenced for column type
# inference.
# - 'top_level' is a boolean flag that is assumed to be true; if it's
# true then a "$schema" arg is added to the dict.
# - 'datasets' is a dict of named datasets that should be inserted
# in the top-level object
# note: not a deep copy because we want datasets and data arguments to
# be passed by reference
context = kwargs.get("context", {}).copy()
context.setdefault("datasets", {})
is_top_level = context.get("top_level", True)
copy = self.copy(deep=False)
original_data = getattr(copy, "data", Undefined)
copy.data = _prepare_data(original_data, context)
if original_data is not Undefined:
context["data"] = original_data
# remaining to_dict calls are not at top level
context["top_level"] = False
kwargs["context"] = context
try:
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
except jsonschema.ValidationError:
dct = None
# If we hit an error, then re-convert with validate='deep' to get
# a more useful traceback. We don't do this by default because it's
# much slower in the case that there are no errors.
if dct is None:
kwargs["validate"] = "deep"
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
# TODO: following entries are added after validation. Should they be validated?
if is_top_level:
# since this is top-level we add $schema if it's missing
if "$schema" not in dct:
dct["$schema"] = SCHEMA_URL
# apply theme from theme registry
the_theme = themes.get()
dct = utils.update_nested(the_theme(), dct, copy=True)
# update datasets
if context["datasets"]:
dct.setdefault("datasets", {}).update(context["datasets"])
return dct
def to_html(
self,
base_url="https://cdn.jsdelivr.net/npm/",
output_div="vis",
embed_options=None,
json_kwds=None,
fullhtml=True,
requirejs=False,
):
return utils.spec_to_html(
self.to_dict(),
mode="vega-lite",
vegalite_version=VEGALITE_VERSION,
vegaembed_version=VEGAEMBED_VERSION,
vega_version=VEGA_VERSION,
base_url=base_url,
output_div=output_div,
embed_options=embed_options,
json_kwds=json_kwds,
fullhtml=fullhtml,
requirejs=requirejs,
)
def save(
self,
fp,
format=None,
override_data_transformer=True,
scale_factor=1.0,
vegalite_version=VEGALITE_VERSION,
vega_version=VEGA_VERSION,
vegaembed_version=VEGAEMBED_VERSION,
**kwargs,
):
"""Save a chart to file in a variety of formats
Supported formats are json, html, png, svg, pdf; the last three require
the altair_saver package to be installed.
Parameters
----------
fp : string filename or file-like object
file in which to write the chart.
format : string (optional)
the format to write: one of ['json', 'html', 'png', 'svg', 'pdf'].
If not specified, the format will be determined from the filename.
override_data_transformer : boolean (optional)
If True (default), then the save action will be done with
the MaxRowsError disabled. If False, then do not change the data
transformer.
scale_factor : float
For svg or png formats, scale the image by this factor when saving.
This can be used to control the size or resolution of the output.
Default is 1.0
**kwargs :
Additional keyword arguments are passed to the output method
associated with the specified format.
"""
from ...utils.save import save
kwds = dict(
chart=self,
fp=fp,
format=format,
scale_factor=scale_factor,
vegalite_version=vegalite_version,
vega_version=vega_version,
vegaembed_version=vegaembed_version,
**kwargs,
)
# By default we override the data transformer. This makes it so
# that save() will succeed even for large datasets that would
# normally trigger a MaxRowsError
if override_data_transformer:
with data_transformers.disable_max_rows():
result = save(**kwds)
else:
result = save(**kwds)
return result
# Fallback for when rendering fails; the full repr is too long to be
# useful in nearly all cases.
def __repr__(self):
return "alt.{}(...)".format(self.__class__.__name__)
# Layering and stacking
def __add__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be layered.")
return layer(self, other)
def __and__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be concatenated.")
return vconcat(self, other)
def __or__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be concatenated.")
return hconcat(self, other)
def repeat(
self,
repeat=Undefined,
row=Undefined,
column=Undefined,
layer=Undefined,
columns=Undefined,
**kwargs,
):
"""Return a RepeatChart built from the chart
Fields within the chart can be set to correspond to the row or
column using `alt.repeat('row')` and `alt.repeat('column')`.
Parameters
----------
repeat : list
a list of data column names to be repeated. This cannot be
used along with the ``row``, ``column`` or ``layer`` argument.
row : list
a list of data column names to be mapped to the row facet
column : list
a list of data column names to be mapped to the column facet
layer : list
a list of data column names to be layered. This cannot be
used along with the ``row``, ``column`` or ``repeat`` argument.
columns : int
the maximum number of columns before wrapping. Only referenced
if ``repeat`` is specified.
**kwargs :
additional keywords passed to RepeatChart.
Returns
-------
chart : RepeatChart
a repeated chart.
"""
repeat_specified = repeat is not Undefined
rowcol_specified = row is not Undefined or column is not Undefined
layer_specified = layer is not Undefined
if repeat_specified and rowcol_specified:
raise ValueError(
"repeat argument cannot be combined with row/column argument."
)
elif repeat_specified and layer_specified:
raise ValueError("repeat argument cannot be combined with layer argument.")
elif layer_specified and rowcol_specified:
raise ValueError(
"layer argument cannot be combined with row/column argument."
)
if repeat_specified:
repeat = repeat
elif layer_specified:
repeat = core.LayerRepeatMapping(layer=layer)
else:
repeat = core.RepeatMapping(row=row, column=column)
return RepeatChart(spec=self, repeat=repeat, columns=columns, **kwargs)
def properties(self, **kwargs):
"""Set top-level properties of the Chart.
Argument names and types are the same as class initialization.
"""
copy = self.copy(deep=False)
for key, val in kwargs.items():
if key == "selection" and isinstance(val, Selection):
# For backward compatibility with old selection interface.
setattr(copy, key, {val.name: val.selection})
else:
# Don't validate data, because it hasn't been processed.
if key != "data":
self.validate_property(key, val)
setattr(copy, key, val)
return copy
def project(
self,
type="mercator",
center=Undefined,
clipAngle=Undefined,
clipExtent=Undefined,
coefficient=Undefined,
distance=Undefined,
fraction=Undefined,
lobes=Undefined,
parallel=Undefined,
precision=Undefined,
radius=Undefined,
ratio=Undefined,
reflectX=Undefined,
reflectY=Undefined,
rotate=Undefined,
scale=Undefined,
spacing=Undefined,
tilt=Undefined,
translate=Undefined,
**kwds,
):
"""Add a geographic projection to the chart.
This is generally used either with ``mark_geoshape`` or with the
``latitude``/``longitude`` encodings.
Available projection types are
['albers', 'albersUsa', 'azimuthalEqualArea', 'azimuthalEquidistant',
'conicConformal', 'conicEqualArea', 'conicEquidistant', 'equalEarth', 'equirectangular',
'gnomonic', 'identity', 'mercator', 'orthographic', 'stereographic', 'transverseMercator']
Attributes
----------
type : ProjectionType
The cartographic projection to use. This value is case-insensitive, for example
`"albers"` and `"Albers"` indicate the same projection type. You can find all valid
projection types [in the
documentation](https://vega.github.io/vega-lite/docs/projection.html#projection-types).
**Default value:** `mercator`
center : List(float)
Sets the projection’s center to the specified center, a two-element array of
longitude and latitude in degrees.
**Default value:** `[0, 0]`
clipAngle : float
Sets the projection’s clipping circle radius to the specified angle in degrees. If
`null`, switches to [antimeridian](http://bl.ocks.org/mbostock/3788999) cutting
rather than small-circle clipping.
clipExtent : List(List(float))
Sets the projection’s viewport clip extent to the specified bounds in pixels. The
extent bounds are specified as an array `[[x0, y0], [x1, y1]]`, where `x0` is the
left-side of the viewport, `y0` is the top, `x1` is the right and `y1` is the
bottom. If `null`, no viewport clipping is performed.
coefficient : float
distance : float
fraction : float
lobes : float
parallel : float
precision : Mapping(required=[length])
Sets the threshold for the projection’s [adaptive
resampling](http://bl.ocks.org/mbostock/3795544) to the specified value in pixels.
This value corresponds to the [Douglas–Peucker
distance](http://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm).
If precision is not specified, returns the projection’s current resampling
precision which defaults to `√0.5 ≅ 0.70710…`.
radius : float
ratio : float
reflectX : boolean
reflectY : boolean
rotate : List(float)
Sets the projection’s three-axis rotation to the specified angles, which must be a
two- or three-element array of numbers [`lambda`, `phi`, `gamma`] specifying the
rotation angles in degrees about each spherical axis. (These correspond to yaw,
pitch and roll.)
**Default value:** `[0, 0, 0]`
scale : float
Sets the projection's scale (zoom) value, overriding automatic fitting.
spacing : float
tilt : float
translate : List(float)
Sets the projection's translation (pan) value, overriding automatic fitting.
"""
projection = core.Projection(
center=center,
clipAngle=clipAngle,
clipExtent=clipExtent,
coefficient=coefficient,
distance=distance,
fraction=fraction,
lobes=lobes,
parallel=parallel,
precision=precision,
radius=radius,
ratio=ratio,
reflectX=reflectX,
reflectY=reflectY,
rotate=rotate,
scale=scale,
spacing=spacing,
tilt=tilt,
translate=translate,
type=type,
**kwds,
)
return self.properties(projection=projection)
def _add_transform(self, *transforms):
"""Copy the chart and add specified transforms to chart.transform"""
copy = self.copy(deep=["transform"])
if copy.transform is Undefined:
copy.transform = []
copy.transform.extend(transforms)
return copy
def transform_aggregate(self, aggregate=Undefined, groupby=Undefined, **kwds):
"""
Add an AggregateTransform to the schema.
Parameters
----------
aggregate : List(:class:`AggregatedFieldDef`)
Array of objects that define fields to aggregate.
groupby : List(string)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
**kwds :
additional keywords are converted to aggregates using standard
shorthand parsing.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
The aggregate transform allows you to specify transforms directly using
the same shorthand syntax as used in encodings:
>>> import altair as alt
>>> chart1 = alt.Chart().transform_aggregate(
... mean_acc='mean(Acceleration)',
... groupby=['Origin']
... )
>>> print(chart1.transform[0].to_json()) # doctest: +NORMALIZE_WHITESPACE
{
"aggregate": [
{
"as": "mean_acc",
"field": "Acceleration",
"op": "mean"
}
],
"groupby": [
"Origin"
]
}
It also supports including AggregatedFieldDef instances or dicts directly,
so you can create the above transform like this:
>>> chart2 = alt.Chart().transform_aggregate(
... [alt.AggregatedFieldDef(field='Acceleration', op='mean',
... **{'as': 'mean_acc'})],
... groupby=['Origin']
... )
>>> chart2.transform == chart1.transform
True
See Also
--------
alt.AggregateTransform : underlying transform object
"""
if aggregate is Undefined:
aggregate = []
for key, val in kwds.items():
parsed = utils.parse_shorthand(val)
dct = {
"as": key,
"field": parsed.get("field", Undefined),
"op": parsed.get("aggregate", Undefined),
}
aggregate.append(core.AggregatedFieldDef(**dct))
return self._add_transform(
core.AggregateTransform(aggregate=aggregate, groupby=groupby)
)
def transform_bin(self, as_=Undefined, field=Undefined, bin=True, **kwargs):
"""
Add a BinTransform to the schema.
Parameters
----------
as_ : anyOf(string, List(string))
The output fields at which to write the start and end bin values.
bin : anyOf(boolean, :class:`BinParams`)
An object indicating bin properties, or simply ``true`` for using default bin
parameters.
field : string
The data field to bin.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> chart = alt.Chart().transform_bin("x_binned", "x")
>>> chart.transform[0]
BinTransform({
as: 'x_binned',
bin: True,
field: 'x'
})
>>> chart = alt.Chart().transform_bin("x_binned", "x",
... bin=alt.Bin(maxbins=10))
>>> chart.transform[0]
BinTransform({
as: 'x_binned',
bin: BinParams({
maxbins: 10
}),
field: 'x'
})
See Also
--------
alt.BinTransform : underlying transform object
"""
if as_ is not Undefined:
if "as" in kwargs:
raise ValueError(
"transform_bin: both 'as_' and 'as' passed as arguments."
)
kwargs["as"] = as_
kwargs["bin"] = bin
kwargs["field"] = field
return self._add_transform(core.BinTransform(**kwargs))
def transform_calculate(self, as_=Undefined, calculate=Undefined, **kwargs):
"""
Add a CalculateTransform to the schema.
Parameters
----------
as_ : string
The field for storing the computed formula value.
calculate : string or alt.expr expression
A `expression <https://vega.github.io/vega-lite/docs/types.html#expression>`__
string. Use the variable ``datum`` to refer to the current data object.
**kwargs
transforms can also be passed by keyword argument; see Examples
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> from altair import datum, expr
>>> chart = alt.Chart().transform_calculate(y = 2 * expr.sin(datum.x))
>>> chart.transform[0]
CalculateTransform({
as: 'y',
calculate: (2 * sin(datum.x))
})
It's also possible to pass the ``CalculateTransform`` arguments directly:
>>> kwds = {'as': 'y', 'calculate': '2 * sin(datum.x)'}
>>> chart = alt.Chart().transform_calculate(**kwds)
>>> chart.transform[0]
CalculateTransform({
as: 'y',
calculate: '2 * sin(datum.x)'
})
As the first form is easier to write and understand, that is the
recommended method.
See Also
--------
alt.CalculateTransform : underlying transform object
"""
if as_ is Undefined:
as_ = kwargs.pop("as", Undefined)
elif "as" in kwargs:
raise ValueError(
"transform_calculate: both 'as_' and 'as' passed as arguments."
)
if as_ is not Undefined or calculate is not Undefined:
dct = {"as": as_, "calculate": calculate}
self = self._add_transform(core.CalculateTransform(**dct))
for as_, calculate in kwargs.items():
dct = {"as": as_, "calculate": calculate}
self = self._add_transform(core.CalculateTransform(**dct))
return self
def transform_density(
self,
density,
as_=Undefined,
bandwidth=Undefined,
counts=Undefined,
cumulative=Undefined,
extent=Undefined,
groupby=Undefined,
maxsteps=Undefined,
minsteps=Undefined,
steps=Undefined,
):
"""Add a DensityTransform to the spec.
Attributes
----------
density : str
The data field for which to perform density estimation.
as_ : [str, str]
The output fields for the sample value and corresponding density estimate.
**Default value:** ``["value", "density"]``
bandwidth : float
The bandwidth (standard deviation) of the Gaussian kernel. If unspecified or set to
zero, the bandwidth value is automatically estimated from the input data using
Scott’s rule.
counts : boolean
A boolean flag indicating if the output values should be probability estimates
(false) or smoothed counts (true).
**Default value:** ``false``
cumulative : boolean
A boolean flag indicating whether to produce density estimates (false) or cumulative
density estimates (true).
**Default value:** ``false``
extent : List([float, float])
A [min, max] domain from which to sample the distribution. If unspecified, the
extent will be determined by the observed minimum and maximum values of the density
value field.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
maxsteps : float
The maximum number of samples to take along the extent domain for plotting the
density. **Default value:** ``200``
minsteps : float
The minimum number of samples to take along the extent domain for plotting the
density. **Default value:** ``25``
steps : float
The exact number of samples to take along the extent domain for plotting the
density. If specified, overrides both minsteps and maxsteps to set an exact number
of uniform samples. Potentially useful in conjunction with a fixed extent to ensure
consistent sample points for stacked densities.
"""
return self._add_transform(
core.DensityTransform(
density=density,
bandwidth=bandwidth,
counts=counts,
cumulative=cumulative,
extent=extent,
groupby=groupby,
maxsteps=maxsteps,
minsteps=minsteps,
steps=steps,
**{"as": as_},
)
)
def transform_impute(
self,
impute,
key,
frame=Undefined,
groupby=Undefined,
keyvals=Undefined,
method=Undefined,
value=Undefined,
):
"""
Add an ImputeTransform to the schema.
Parameters
----------
impute : string
The data field for which the missing values should be imputed.
key : string
A key field that uniquely identifies data objects within a group.
Missing key values (those occurring in the data but not in the current group) will
be imputed.
frame : List(anyOf(None, float))
A frame specification as a two-element array used to control the window over which
the specified method is applied. The array entries should either be a number
indicating the offset from the current data object, or null to indicate unbounded
rows preceding or following the current data object. For example, the value ``[-5,
5]`` indicates that the window should include five objects preceding and five
objects following the current object.
**Default value:** : ``[null, null]`` indicating that the window includes all
objects.
groupby : List(string)
An optional array of fields by which to group the values.
Imputation will then be performed on a per-group basis.
keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
If provided, this will be used in addition to the key values observed within the
input data. If not provided, the values will be derived from all unique values of
the ``key`` field. For ``impute`` in ``encoding``, the key field is the x-field if
the y-field is imputed, or vice versa.
If there is no impute grouping, this property *must* be specified.
method : :class:`ImputeMethod`
The imputation method to use for the field value of imputed data objects.
One of ``value``, ``mean``, ``median``, ``max`` or ``min``.
**Default value:** ``"value"``
value : Mapping(required=[])
The field value to use when the imputation ``method`` is ``"value"``.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.ImputeTransform : underlying transform object
"""
return self._add_transform(
core.ImputeTransform(
impute=impute,
key=key,
frame=frame,
groupby=groupby,
keyvals=keyvals,
method=method,
value=value,
)
)
def transform_joinaggregate(
self, joinaggregate=Undefined, groupby=Undefined, **kwargs
):
"""
Add a JoinAggregateTransform to the schema.
Parameters
----------
joinaggregate : List(:class:`JoinAggregateFieldDef`)
The definition of the fields in the join aggregate, and what calculations to use.
groupby : List(string)
The data fields for partitioning the data objects into separate groups. If
unspecified, all data points will be in a single group.
**kwargs
joinaggregates can also be passed by keyword argument; see Examples.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> chart = alt.Chart().transform_joinaggregate(x='sum(y)')
>>> chart.transform[0]
JoinAggregateTransform({
joinaggregate: [JoinAggregateFieldDef({
as: 'x',
field: 'y',
op: 'sum'
})]
})
See Also
--------
alt.JoinAggregateTransform : underlying transform object
"""
if joinaggregate is Undefined:
joinaggregate = []
for key, val in kwargs.items():
parsed = utils.parse_shorthand(val)
dct = {
"as": key,
"field": parsed.get("field", Undefined),
"op": parsed.get("aggregate", Undefined),
}
joinaggregate.append(core.JoinAggregateFieldDef(**dct))
return self._add_transform(
core.JoinAggregateTransform(joinaggregate=joinaggregate, groupby=groupby)
)
def transform_filter(self, filter, **kwargs):
"""
Add a FilterTransform to the schema.
Parameters
----------
filter : a filter expression or :class:`PredicateComposition`
The `filter` property must be one of the predicate definitions:
(1) a string or alt.expr expression
(2) a range predicate
(3) a selection predicate
(4) a logical operand combining (1)-(3)
(5) a Selection object
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.FilterTransform : underlying transform object
"""
if isinstance(filter, Selection):
filter = {"selection": filter.name}
elif isinstance(filter, core.SelectionComposition):
filter = {"selection": filter}
return self._add_transform(core.FilterTransform(filter=filter, **kwargs))
def transform_flatten(self, flatten, as_=Undefined):
"""Add a FlattenTransform to the schema.
Parameters
----------
flatten : List(string)
An array of one or more data fields containing arrays to flatten.
If multiple fields are specified, their array values should have a parallel
structure, ideally with the same length.
If the lengths of parallel arrays do not match,
the longest array will be used with ``null`` values added for missing entries.
as : List(string)
The output field names for extracted array values.
**Default value:** The field name of the corresponding array field
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.FlattenTransform : underlying transform object
"""
return self._add_transform(
core.FlattenTransform(flatten=flatten, **{"as": as_})
)
def transform_fold(self, fold, as_=Undefined):
"""Add a FoldTransform to the spec.
Parameters
----------
fold : List(string)
An array of data fields indicating the properties to fold.
as : [string, string]
The output field names for the key and value properties produced by the fold
transform. Default: ``["key", "value"]``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_pivot : pivot transform - opposite of fold.
alt.FoldTransform : underlying transform object
"""
return self._add_transform(core.FoldTransform(fold=fold, **{"as": as_}))
def transform_loess(
self, on, loess, as_=Undefined, bandwidth=Undefined, groupby=Undefined
):
"""Add a LoessTransform to the spec.
Parameters
----------
on : str
The data field of the independent variable to use a predictor.
loess : str
The data field of the dependent variable to smooth.
as_ : [str, str]
The output field names for the smoothed points generated by the loess transform.
**Default value:** The field names of the input x and y values.
bandwidth : float
A bandwidth parameter in the range ``[0, 1]`` that determines the amount of
smoothing. **Default value:** ``0.3``
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_regression: regression transform
alt.LoessTransform : underlying transform object
"""
return self._add_transform(
core.LoessTransform(
loess=loess, on=on, bandwidth=bandwidth, groupby=groupby, **{"as": as_}
)
)
def transform_lookup(
self,
lookup=Undefined,
from_=Undefined,
as_=Undefined,
default=Undefined,
**kwargs,
):
"""Add a DataLookupTransform or SelectionLookupTransform to the chart
Attributes
----------
lookup : string
Key in primary data source.
from_ : anyOf(:class:`LookupData`, :class:`LookupSelection`)
Secondary data reference.
as_ : anyOf(string, List(string))
The output fields on which to store the looked up data values.
For data lookups, this property may be left blank if ``from_.fields``
has been specified (those field names will be used); if ``from_.fields``
has not been specified, ``as_`` must be a string.
For selection lookups, this property is optional: if unspecified,
looked up values will be stored under a property named for the selection;
and if specified, it must correspond to ``from_.fields``.
default : string
The default value to use if lookup fails. **Default value:** ``null``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.DataLookupTransform : underlying transform object
alt.SelectionLookupTransform : underlying transform object
"""
if as_ is not Undefined:
if "as" in kwargs:
raise ValueError(
"transform_lookup: both 'as_' and 'as' passed as arguments."
)
kwargs["as"] = as_
if from_ is not Undefined:
if "from" in kwargs:
raise ValueError(
"transform_lookup: both 'from_' and 'from' passed as arguments."
)
kwargs["from"] = from_
kwargs["lookup"] = lookup
kwargs["default"] = default
return self._add_transform(core.LookupTransform(**kwargs))
def transform_pivot(
self, pivot, value, groupby=Undefined, limit=Undefined, op=Undefined
):
"""Add a pivot transform to the chart.
Parameters
----------
pivot : str
The data field to pivot on. The unique values of this field become new field names
in the output stream.
value : str
The data field to populate pivoted fields. The aggregate values of this field become
the values of the new pivoted fields.
groupby : List(str)
The optional data fields to group by. If not specified, a single group containing
all data objects will be used.
limit : float
An optional parameter indicating the maximum number of pivoted fields to generate.
The default ( ``0`` ) applies no limit. The pivoted ``pivot`` names are sorted in
ascending order prior to enforcing the limit.
**Default value:** ``0``
op : string
The aggregation operation to apply to grouped ``value`` field values.
**Default value:** ``sum``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_fold : fold transform - opposite of pivot.
alt.PivotTransform : underlying transform object
"""
return self._add_transform(
core.PivotTransform(
pivot=pivot, value=value, groupby=groupby, limit=limit, op=op
)
)
def transform_quantile(
self,
quantile,
as_=Undefined,
groupby=Undefined,
probs=Undefined,
step=Undefined,
):
"""Add a quantile transform to the chart
Parameters
----------
quantile : str
The data field for which to perform quantile estimation.
as : [str, str]
The output field names for the probability and quantile values.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
probs : List(float)
An array of probabilities in the range (0, 1) for which to compute quantile values.
If not specified, the *step* parameter will be used.
step : float
A probability step size (default 0.01) for sampling quantile values. All values from
one-half the step size up to 1 (exclusive) will be sampled. This parameter is only
used if the *probs* parameter is not provided. **Default value:** ``["prob", "value"]``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.QuantileTransform : underlying transform object
"""
return self._add_transform(
core.QuantileTransform(
quantile=quantile,
groupby=groupby,
probs=probs,
step=step,
**{"as": as_},
)
)
def transform_regression(
self,
on,
regression,
as_=Undefined,
extent=Undefined,
groupby=Undefined,
method=Undefined,
order=Undefined,
params=Undefined,
):
"""Add a RegressionTransform to the chart.
Parameters
----------
on : str
The data field of the independent variable to use a predictor.
regression : str
The data field of the dependent variable to predict.
as_ : [str, str]
The output field names for the smoothed points generated by the regression
transform. **Default value:** The field names of the input x and y values.
extent : [float, float]
A [min, max] domain over the independent (x) field for the starting and ending
points of the generated trend line.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
method : enum('linear', 'log', 'exp', 'pow', 'quad', 'poly')
The functional form of the regression model. One of ``"linear"``, ``"log"``,
``"exp"``, ``"pow"``, ``"quad"``, or ``"poly"``. **Default value:** ``"linear"``
order : float
The polynomial order (number of coefficients) for the 'poly' method.
**Default value:** ``3``
params : boolean
A boolean flag indicating if the transform should return the regression model
parameters (one object per group), rather than trend line points.
The resulting objects include a ``coef`` array of fitted coefficient values
(starting with the intercept term and then including terms of increasing order)
and an ``rSquared`` value (indicating the total variance explained by the model).
**Default value:** ``false``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_loess : LOESS transform
alt.RegressionTransform : underlying transform object
"""
return self._add_transform(
core.RegressionTransform(
regression=regression,
on=on,
extent=extent,
groupby=groupby,
method=method,
order=order,
params=params,
**{"as": as_},
)
)
def transform_sample(self, sample=1000):
"""
Add a SampleTransform to the schema.
Parameters
----------
sample : float
The maximum number of data objects to include in the sample. Default: 1000.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.SampleTransform : underlying transform object
"""
return self._add_transform(core.SampleTransform(sample))
def transform_stack(self, as_, stack, groupby, offset=Undefined, sort=Undefined):
"""
Add a StackTransform to the schema.
Parameters
----------
as_ : anyOf(string, List(string))
Output field names. This can be either a string or an array of strings with
two elements denoting the name for the fields for stack start and stack end
respectively.
If a single string(eg."val") is provided, the end field will be "val_end".
stack : string
The field which is stacked.
groupby : List(string)
The data fields to group by.
offset : enum('zero', 'center', 'normalize')
Mode for stacking marks. Default: 'zero'.
sort : List(:class:`SortField`)
Field that determines the order of leaves in the stacked charts.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.StackTransform : underlying transform object
"""
return self._add_transform(
core.StackTransform(
stack=stack, groupby=groupby, offset=offset, sort=sort, **{"as": as_}
)
)
def transform_timeunit(
self, as_=Undefined, field=Undefined, timeUnit=Undefined, **kwargs
):
"""
Add a TimeUnitTransform to the schema.
Parameters
----------
as_ : string
The output field to write the timeUnit value.
field : string
The data field to apply time unit.
timeUnit : :class:`TimeUnit`
The timeUnit.
**kwargs
transforms can also be passed by keyword argument; see Examples
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> from altair import datum, expr
>>> chart = alt.Chart().transform_timeunit(month='month(date)')
>>> chart.transform[0]
TimeUnitTransform({
as: 'month',
field: 'date',
timeUnit: 'month'
})
It's also possible to pass the ``TimeUnitTransform`` arguments directly;
this is most useful in cases where the desired field name is not a
valid python identifier:
>>> kwds = {'as': 'month', 'timeUnit': 'month', 'field': 'The Month'}
>>> chart = alt.Chart().transform_timeunit(**kwds)
>>> chart.transform[0]
TimeUnitTransform({
as: 'month',
field: 'The Month',
timeUnit: 'month'
})
As the first form is easier to write and understand, that is the
recommended method.
See Also
--------
alt.TimeUnitTransform : underlying transform object
"""
if as_ is Undefined:
as_ = kwargs.pop("as", Undefined)
else:
if "as" in kwargs:
raise ValueError(
"transform_timeunit: both 'as_' and 'as' passed as arguments."
)
if as_ is not Undefined:
dct = {"as": as_, "timeUnit": timeUnit, "field": field}
self = self._add_transform(core.TimeUnitTransform(**dct))
for as_, shorthand in kwargs.items():
dct = utils.parse_shorthand(
shorthand,
parse_timeunits=True,
parse_aggregates=False,
parse_types=False,
)
dct.pop("type", None)
dct["as"] = as_
if "timeUnit" not in dct:
raise ValueError("'{}' must include a valid timeUnit".format(shorthand))
self = self._add_transform(core.TimeUnitTransform(**dct))
return self
def transform_window(
self,
window=Undefined,
frame=Undefined,
groupby=Undefined,
ignorePeers=Undefined,
sort=Undefined,
**kwargs,
):
"""Add a WindowTransform to the schema
Parameters
----------
window : List(:class:`WindowFieldDef`)
The definition of the fields in the window, and what calculations to use.
frame : List(anyOf(None, float))
A frame specification as a two-element array indicating how the sliding window
should proceed. The array entries should either be a number indicating the offset
from the current data object, or null to indicate unbounded rows preceding or
following the current data object. The default value is ``[null, 0]``, indicating
that the sliding window includes the current object and all preceding objects. The
value ``[-5, 5]`` indicates that the window should include five objects preceding
and five objects following the current object. Finally, ``[null, null]`` indicates
that the window frame should always include all data objects. The only operators
affected are the aggregation operations and the ``first_value``, ``last_value``, and
``nth_value`` window operations. The other window operations are not affected by
this.
**Default value:** : ``[null, 0]`` (includes the current object and all preceding
objects)
groupby : List(string)
The data fields for partitioning the data objects into separate windows. If
unspecified, all data points will be in a single group.
ignorePeers : boolean
Indicates if the sliding window frame should ignore peer values. (Peer values are
those considered identical by the sort criteria). The default is false, causing the
window frame to expand to include all peer values. If set to true, the window frame
will be defined by offset values only. This setting only affects those operations
that depend on the window frame, namely aggregation operations and the first_value,
last_value, and nth_value window operations.
**Default value:** ``false``
sort : List(:class:`SortField`)
A sort field definition for sorting data objects within a window. If two data
objects are considered equal by the comparator, they are considered “peer” values of
equal rank. If sort is not specified, the order is undefined: data objects are
processed in the order they are observed and none are considered peers (the
ignorePeers parameter is ignored and treated as if set to ``true`` ).
**kwargs
transforms can also be passed by keyword argument; see Examples
Examples
--------
A cumulative line chart
>>> import altair as alt
>>> import numpy as np
>>> import pandas as pd
>>> data = pd.DataFrame({'x': np.arange(100),
... 'y': np.random.randn(100)})
>>> chart = alt.Chart(data).mark_line().encode(
... x='x:Q',
... y='ycuml:Q'
... ).transform_window(
... ycuml='sum(y)'
... )
>>> chart.transform[0]
WindowTransform({
window: [WindowFieldDef({
as: 'ycuml',
field: 'y',
op: 'sum'
})]
})
"""
if kwargs:
if window is Undefined:
window = []
for as_, shorthand in kwargs.items():
kwds = {"as": as_}
kwds.update(
utils.parse_shorthand(
shorthand,
parse_aggregates=False,
parse_window_ops=True,
parse_timeunits=False,
parse_types=False,
)
)
window.append(core.WindowFieldDef(**kwds))
return self._add_transform(
core.WindowTransform(
window=window,
frame=frame,
groupby=groupby,
ignorePeers=ignorePeers,
sort=sort,
)
)
# Display-related methods
def _repr_mimebundle_(self, include=None, exclude=None):
"""Return a MIME bundle for display in Jupyter frontends."""
# Catch errors explicitly to get around issues in Jupyter frontend
# see https://github.com/ipython/ipython/issues/11038
try:
dct = self.to_dict()
except Exception:
utils.display_traceback(in_ipython=True)
return {}
else:
return renderers.get()(dct)
def display(self, renderer=Undefined, theme=Undefined, actions=Undefined, **kwargs):
"""Display chart in Jupyter notebook or JupyterLab
Parameters are passed as options to vega-embed within supported frontends.
See https://github.com/vega/vega-embed#options for details.
Parameters
----------
renderer : string ('canvas' or 'svg')
The renderer to use
theme : string
The Vega theme name to use; see https://github.com/vega/vega-themes
actions : bool or dict
Specify whether action links ("Open In Vega Editor", etc.) are
included in the view.
**kwargs :
Additional parameters are also passed to vega-embed as options.
"""
from IPython.display import display
if renderer is not Undefined:
kwargs["renderer"] = renderer
if theme is not Undefined:
kwargs["theme"] = theme
if actions is not Undefined:
kwargs["actions"] = actions
if kwargs:
options = renderers.options.copy()
options["embed_options"] = options.get("embed_options", {}).copy()
options["embed_options"].update(kwargs)
with renderers.enable(**options):
display(self)
else:
display(self)
@utils.deprecation.deprecated(message="serve() is deprecated. Use show() instead.")
def serve(
self,
ip="127.0.0.1",
port=8888,
n_retries=50,
files=None,
jupyter_warning=True,
open_browser=True,
http_server=None,
**kwargs,
):
"""Open a browser window and display a rendering of the chart
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port
is already in use.
files : dictionary (optional)
dictionary of extra content to serve
jupyter_warning : bool (optional)
if True (default), then print a warning if this is used
within the Jupyter notebook
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
**kwargs :
additional keyword arguments passed to the save() method
"""
from ...utils.server import serve
html = io.StringIO()
self.save(html, format="html", **kwargs)
html.seek(0)
serve(
html.read(),
ip=ip,
port=port,
n_retries=n_retries,
files=files,
jupyter_warning=jupyter_warning,
open_browser=open_browser,
http_server=http_server,
)
def show(self, embed_opt=None, open_browser=None):
"""Show the chart in an external browser window.
This requires a recent version of the altair_viewer package.
Parameters
----------
embed_opt : dict (optional)
The Vega embed options that control the dispay of the chart.
open_browser : bool (optional)
Specify whether a browser window should be opened. If not specified,
a browser window will be opened only if the server is not already
connected to a browser.
"""
try:
import altair_viewer # type: ignore
except ImportError:
raise ValueError(
"show() method requires the altair_viewer package. "
"See http://github.com/altair-viz/altair_viewer"
)
altair_viewer.show(self, embed_opt=embed_opt, open_browser=open_browser)
@utils.use_signature(core.Resolve)
def _set_resolve(self, **kwargs):
"""Copy the chart and update the resolve property with kwargs"""
if not hasattr(self, "resolve"):
raise ValueError(
"{} object has no attribute " "'resolve'".format(self.__class__)
)
copy = self.copy(deep=["resolve"])
if copy.resolve is Undefined:
copy.resolve = core.Resolve()
for key, val in kwargs.items():
copy.resolve[key] = val
return copy
@utils.use_signature(core.AxisResolveMap)
def resolve_axis(self, *args, **kwargs):
return self._set_resolve(axis=core.AxisResolveMap(*args, **kwargs))
@utils.use_signature(core.LegendResolveMap)
def resolve_legend(self, *args, **kwargs):
return self._set_resolve(legend=core.LegendResolveMap(*args, **kwargs))
@utils.use_signature(core.ScaleResolveMap)
def resolve_scale(self, *args, **kwargs):
return self._set_resolve(scale=core.ScaleResolveMap(*args, **kwargs))
class _EncodingMixin(object):
@utils.use_signature(core.FacetedEncoding)
def encode(self, *args, **kwargs):
# Convert args to kwargs based on their types.
kwargs = utils.infer_encoding_types(args, kwargs, channels)
# get a copy of the dict representation of the previous encoding
copy = self.copy(deep=["encoding"])
encoding = copy._get("encoding", {})
if isinstance(encoding, core.VegaLiteSchema):
encoding = {k: v for k, v in encoding._kwds.items() if v is not Undefined}
# update with the new encodings, and apply them to the copy
encoding.update(kwargs)
copy.encoding = core.FacetedEncoding(**encoding)
return copy
def facet(
self,
facet=Undefined,
row=Undefined,
column=Undefined,
data=Undefined,
columns=Undefined,
**kwargs,
):
"""Create a facet chart from the current chart.
Faceted charts require data to be specified at the top level; if data
is not specified, the data from the current chart will be used at the
top level.
Parameters
----------
facet : string or alt.Facet (optional)
The data column to use as an encoding for a wrapped facet.
If specified, then neither row nor column may be specified.
column : string or alt.Column (optional)
The data column to use as an encoding for a column facet.
May be combined with row argument, but not with facet argument.
row : string or alt.Column (optional)
The data column to use as an encoding for a row facet.
May be combined with column argument, but not with facet argument.
data : string or dataframe (optional)
The dataset to use for faceting. If not supplied, then data must
be specified in the top-level chart that calls this method.
columns : integer
the maximum number of columns for a wrapped facet.
Returns
-------
self :
for chaining
"""
facet_specified = facet is not Undefined
rowcol_specified = row is not Undefined or column is not Undefined
if facet_specified and rowcol_specified:
raise ValueError(
"facet argument cannot be combined with row/column argument."
)
if data is Undefined:
if self.data is Undefined:
raise ValueError(
"Facet charts require data to be specified at the top level."
)
self = self.copy(deep=False)
data, self.data = self.data, Undefined
if facet_specified:
if isinstance(facet, str):
facet = channels.Facet(facet)
else:
facet = FacetMapping(row=row, column=column)
return FacetChart(spec=self, facet=facet, data=data, columns=columns, **kwargs)
class Chart(
TopLevelMixin, _EncodingMixin, mixins.MarkMethodMixin, core.TopLevelUnitSpec
):
"""Create a basic Altair/Vega-Lite chart.
Although it is possible to set all Chart properties as constructor attributes,
it is more idiomatic to use methods such as ``mark_point()``, ``encode()``,
``transform_filter()``, ``properties()``, etc. See Altair's documentation
for details and examples: http://altair-viz.github.io/.
Attributes
----------
data : Data
An object describing the data source
mark : AnyMark
A string describing the mark type (one of `"bar"`, `"circle"`, `"square"`, `"tick"`,
`"line"`, * `"area"`, `"point"`, `"rule"`, `"geoshape"`, and `"text"`) or a
MarkDef object.
encoding : FacetedEncoding
A key-value mapping between encoding channels and definition of fields.
autosize : anyOf(AutosizeType, AutoSizeParams)
Sets how the visualization size should be determined. If a string, should be one of
`"pad"`, `"fit"` or `"none"`. Object values can additionally specify parameters for
content sizing and automatic resizing. `"fit"` is only supported for single and
layered views that don't use `rangeStep`. __Default value__: `pad`
background : string
CSS color property to use as the background of visualization.
**Default value:** none (transparent)
config : Config
Vega-Lite configuration object. This property can only be defined at the top-level
of a specification.
description : string
Description of this mark for commenting purpose.
height : float
The height of a visualization.
name : string
Name of the visualization for later reference.
padding : Padding
The default visualization padding, in pixels, from the edge of the visualization
canvas to the data rectangle. If a number, specifies padding for all sides. If an
object, the value should have the format `{"left": 5, "top": 5, "right": 5,
"bottom": 5}` to specify padding for each side of the visualization. __Default
value__: `5`
projection : Projection
An object defining properties of geographic projection. Works with `"geoshape"`
marks and `"point"` or `"line"` marks that have a channel (one or more of `"X"`,
`"X2"`, `"Y"`, `"Y2"`) with type `"latitude"`, or `"longitude"`.
selection : Mapping(required=[])
A key-value mapping between selection names and definitions.
title : anyOf(string, TitleParams)
Title for the plot.
transform : List(Transform)
An array of data transformations such as filter and new field calculation.
width : float
The width of a visualization.
"""
def __init__(
self,
data=Undefined,
encoding=Undefined,
mark=Undefined,
width=Undefined,
height=Undefined,
**kwargs,
):
super(Chart, self).__init__(
data=data,
encoding=encoding,
mark=mark,
width=width,
height=height,
**kwargs,
)
@classmethod
def from_dict(cls, dct, validate=True):
"""Construct class from a dictionary representation
Parameters
----------
dct : dictionary
The dict from which to construct the class
validate : boolean
If True (default), then validate the input against the schema.
Returns
-------
obj : Chart object
The wrapped schema
Raises
------
jsonschema.ValidationError :
if validate=True and dct does not conform to the schema
"""
for class_ in TopLevelMixin.__subclasses__():
if class_ is Chart:
class_ = super(Chart, cls)
try:
return class_.from_dict(dct, validate=validate)
except jsonschema.ValidationError:
pass
# As a last resort, try using the Root vegalite object
return core.Root.from_dict(dct, validate)
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export."""
context = kwargs.get("context", {})
if self.data is Undefined and "data" not in context:
# No data specified here or in parent: inject empty data
# for easier specification of datum encodings.
copy = self.copy(deep=False)
copy.data = core.InlineData(values=[{}])
return super(Chart, copy).to_dict(*args, **kwargs)
return super().to_dict(*args, **kwargs)
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections:
return self
copy = self.copy(deep=["selection"])
if copy.selection is Undefined:
copy.selection = {}
for s in selections:
copy.selection[s.name] = s.selection
return copy
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
encodings = []
if bind_x:
encodings.append("x")
if bind_y:
encodings.append("y")
return self.add_selection(
selection_interval(bind="scales", encodings=encodings)
)
def _check_if_valid_subspec(spec, classname):
"""Check if the spec is a valid sub-spec.
If it is not, then raise a ValueError
"""
err = (
'Objects with "{0}" attribute cannot be used within {1}. '
"Consider defining the {0} attribute in the {1} object instead."
)
if not isinstance(spec, (core.SchemaBase, dict)):
raise ValueError("Only chart objects can be used in {0}.".format(classname))
for attr in TOPLEVEL_ONLY_KEYS:
if isinstance(spec, core.SchemaBase):
val = getattr(spec, attr, Undefined)
else:
val = spec.get(attr, Undefined)
if val is not Undefined:
raise ValueError(err.format(attr, classname))
def _check_if_can_be_layered(spec):
"""Check if the spec can be layered."""
def _get(spec, attr):
if isinstance(spec, core.SchemaBase):
return spec._get(attr)
else:
return spec.get(attr, Undefined)
encoding = _get(spec, "encoding")
if encoding is not Undefined:
for channel in ["row", "column", "facet"]:
if _get(encoding, channel) is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, (Chart, LayerChart)):
return
if not isinstance(spec, (core.SchemaBase, dict)):
raise ValueError("Only chart objects can be layered.")
if _get(spec, "facet") is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, FacetChart) or _get(spec, "facet") is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, RepeatChart) or _get(spec, "repeat") is not Undefined:
raise ValueError("Repeat charts cannot be layered.")
if isinstance(spec, ConcatChart) or _get(spec, "concat") is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
if isinstance(spec, HConcatChart) or _get(spec, "hconcat") is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
if isinstance(spec, VConcatChart) or _get(spec, "vconcat") is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
@utils.use_signature(core.TopLevelRepeatSpec)
class RepeatChart(TopLevelMixin, core.TopLevelRepeatSpec):
"""A chart repeated across rows and columns with small changes"""
# Because TopLevelRepeatSpec is defined as a union as of Vega-Lite schema 4.9,
# we set the arguments explicitly here.
# TODO: Should we instead use tools/schemapi/codegen._get_args?
def __init__(
self,
repeat=Undefined,
spec=Undefined,
align=Undefined,
autosize=Undefined,
background=Undefined,
bounds=Undefined,
center=Undefined,
columns=Undefined,
config=Undefined,
data=Undefined,
datasets=Undefined,
description=Undefined,
name=Undefined,
padding=Undefined,
params=Undefined,
resolve=Undefined,
spacing=Undefined,
title=Undefined,
transform=Undefined,
usermeta=Undefined,
**kwds,
):
_check_if_valid_subspec(spec, "RepeatChart")
super(RepeatChart, self).__init__(
repeat=repeat,
spec=spec,
align=align,
autosize=autosize,
background=background,
bounds=bounds,
center=center,
columns=columns,
config=config,
data=data,
datasets=datasets,
description=description,
name=name,
padding=padding,
params=params,
resolve=resolve,
spacing=spacing,
title=title,
transform=transform,
usermeta=usermeta,
**kwds,
)
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
copy = self.copy(deep=False)
copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections or self.spec is Undefined:
return self
copy = self.copy()
copy.spec = copy.spec.add_selection(*selections)
return copy
def repeat(repeater="repeat"):
"""Tie a channel to the row or column within a repeated chart
The output of this should be passed to the ``field`` attribute of
a channel.
Parameters
----------
repeater : {'row'|'column'|'repeat'|'layer'}
The repeater to tie the field to. Default is 'repeat'.
Returns
-------
repeat : RepeatRef object
"""
if repeater not in ["row", "column", "repeat", "layer"]:
raise ValueError("repeater must be one of ['row', 'column', 'repeat', 'layer']")
return core.RepeatRef(repeat=repeater)
@utils.use_signature(core.TopLevelNormalizedConcatSpecGenericSpec)
class ConcatChart(TopLevelMixin, core.TopLevelNormalizedConcatSpecGenericSpec):
"""A chart with horizontally-concatenated facets"""
def __init__(self, data=Undefined, concat=(), columns=Undefined, **kwargs):
# TODO: move common data to top level?
for spec in concat:
_check_if_valid_subspec(spec, "ConcatChart")
super(ConcatChart, self).__init__(
data=data, concat=list(concat), columns=columns, **kwargs
)
self.data, self.concat = _combine_subchart_data(self.data, self.concat)
def __ior__(self, other):
_check_if_valid_subspec(other, "ConcatChart")
self.concat.append(other)
self.data, self.concat = _combine_subchart_data(self.data, self.concat)
return self
def __or__(self, other):
copy = self.copy(deep=["concat"])
copy |= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.concat:
return self
copy = self.copy()
copy.concat = [chart.add_selection(*selections) for chart in copy.concat]
return copy
def concat(*charts, **kwargs):
"""Concatenate charts horizontally"""
return ConcatChart(concat=charts, **kwargs)
@utils.use_signature(core.TopLevelNormalizedHConcatSpecGenericSpec)
class HConcatChart(TopLevelMixin, core.TopLevelNormalizedHConcatSpecGenericSpec):
"""A chart with horizontally-concatenated facets"""
def __init__(self, data=Undefined, hconcat=(), **kwargs):
# TODO: move common data to top level?
for spec in hconcat:
_check_if_valid_subspec(spec, "HConcatChart")
super(HConcatChart, self).__init__(data=data, hconcat=list(hconcat), **kwargs)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
def __ior__(self, other):
_check_if_valid_subspec(other, "HConcatChart")
self.hconcat.append(other)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
return self
def __or__(self, other):
copy = self.copy(deep=["hconcat"])
copy |= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.hconcat:
return self
copy = self.copy()
copy.hconcat = [chart.add_selection(*selections) for chart in copy.hconcat]
return copy
def hconcat(*charts, **kwargs):
"""Concatenate charts horizontally"""
return HConcatChart(hconcat=charts, **kwargs)
@utils.use_signature(core.TopLevelNormalizedVConcatSpecGenericSpec)
class VConcatChart(TopLevelMixin, core.TopLevelNormalizedVConcatSpecGenericSpec):
"""A chart with vertically-concatenated facets"""
def __init__(self, data=Undefined, vconcat=(), **kwargs):
# TODO: move common data to top level?
for spec in vconcat:
_check_if_valid_subspec(spec, "VConcatChart")
super(VConcatChart, self).__init__(data=data, vconcat=list(vconcat), **kwargs)
self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
def __iand__(self, other):
_check_if_valid_subspec(other, "VConcatChart")
self.vconcat.append(other)
self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
return self
def __and__(self, other):
copy = self.copy(deep=["vconcat"])
copy &= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.vconcat:
return self
copy = self.copy()
copy.vconcat = [chart.add_selection(*selections) for chart in copy.vconcat]
return copy
def vconcat(*charts, **kwargs):
"""Concatenate charts vertically"""
return VConcatChart(vconcat=charts, **kwargs)
@utils.use_signature(core.TopLevelLayerSpec)
class LayerChart(TopLevelMixin, _EncodingMixin, core.TopLevelLayerSpec):
"""A Chart with layers within a single panel"""
def __init__(self, data=Undefined, layer=(), **kwargs):
# TODO: move common data to top level?
# TODO: check for conflicting interaction
for spec in layer:
_check_if_valid_subspec(spec, "LayerChart")
_check_if_can_be_layered(spec)
super(LayerChart, self).__init__(data=data, layer=list(layer), **kwargs)
self.data, self.layer = _combine_subchart_data(self.data, self.layer)
def __iadd__(self, other):
_check_if_valid_subspec(other, "LayerChart")
_check_if_can_be_layered(other)
self.layer.append(other)
self.data, self.layer = _combine_subchart_data(self.data, self.layer)
return self
def __add__(self, other):
copy = self.copy(deep=["layer"])
copy += other
return copy
def add_layers(self, *layers):
copy = self.copy(deep=["layer"])
for layer in layers:
copy += layer
return copy
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
if not self.layer:
raise ValueError(
"LayerChart: cannot call interactive() until a " "layer is defined"
)
copy = self.copy(deep=["layer"])
copy.layer[0] = copy.layer[0].interactive(
name=name, bind_x=bind_x, bind_y=bind_y
)
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.layer:
return self
copy = self.copy()
copy.layer[0] = copy.layer[0].add_selection(*selections)
return copy
def layer(*charts, **kwargs):
"""layer multiple charts"""
return LayerChart(layer=charts, **kwargs)
@utils.use_signature(core.TopLevelFacetSpec)
class FacetChart(TopLevelMixin, core.TopLevelFacetSpec):
"""A Chart with layers within a single panel"""
def __init__(self, data=Undefined, spec=Undefined, facet=Undefined, **kwargs):
_check_if_valid_subspec(spec, "FacetChart")
super(FacetChart, self).__init__(data=data, spec=spec, facet=facet, **kwargs)
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
copy = self.copy(deep=False)
copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections or self.spec is Undefined:
return self
copy = self.copy()
copy.spec = copy.spec.add_selection(*selections)
return copy
def topo_feature(url, feature, **kwargs):
"""A convenience function for extracting features from a topojson url
Parameters
----------
url : string
An URL from which to load the data set.
feature : string
The name of the TopoJSON object set to convert to a GeoJSON feature collection. For
example, in a map of the world, there may be an object set named `"countries"`.
Using the feature property, we can extract this set and generate a GeoJSON feature
object for each country.
**kwargs :
additional keywords passed to TopoDataFormat
"""
return core.UrlData(
url=url, format=core.TopoDataFormat(type="topojson", feature=feature, **kwargs)
)
def _combine_subchart_data(data, subcharts):
def remove_data(subchart):
if subchart.data is not Undefined:
subchart = subchart.copy()
subchart.data = Undefined
return subchart
if not subcharts:
# No subcharts = nothing to do.
pass
elif data is Undefined:
# Top level has no data; all subchart data must
# be identical to proceed.
subdata = subcharts[0].data
if subdata is not Undefined and all(c.data is subdata for c in subcharts):
data = subdata
subcharts = [remove_data(c) for c in subcharts]
else:
# Top level has data; subchart data must be either
# undefined or identical to proceed.
if all(c.data is Undefined or c.data is data for c in subcharts):
subcharts = [remove_data(c) for c in subcharts]
return data, subcharts
@utils.use_signature(core.SequenceParams)
def sequence(start, stop=None, step=Undefined, as_=Undefined, **kwds):
"""Sequence generator."""
if stop is None:
start, stop = 0, start
params = core.SequenceParams(start=start, stop=stop, step=step, **{"as": as_})
return core.SequenceGenerator(sequence=params, **kwds)
@utils.use_signature(core.GraticuleParams)
def graticule(**kwds):
"""Graticule generator."""
if not kwds:
# graticule: True indicates default parameters
graticule = True
else:
graticule = core.GraticuleParams(**kwds)
return core.GraticuleGenerator(graticule=graticule)
def sphere():
"""Sphere generator."""
return core.SphereGenerator(sphere=True)
|
altair-viz/altair
|
altair/vegalite/v4/api.py
|
Python
|
bsd-3-clause
| 89,700
|
[
"Gaussian"
] |
945f6823c07b7ffc4bc637444e70b57a2d336a5d5805c98048e7c11feedc43b1
|
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
# Copyright 2020, Gaute Hope, MET Norway
import sys
import os
import types
import traceback
import inspect
import logging
logging.captureWarnings(True)
logger = logging.getLogger(__name__)
from datetime import datetime, timedelta
from collections import OrderedDict
from abc import ABCMeta, abstractmethod, abstractproperty
import geojson
import xarray as xr
import numpy as np
import scipy
import pyproj
try:
import matplotlib
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.scatterpoints'] = 1
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.patches import Polygon
from matplotlib.path import Path
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
except ImportError:
print('matplotlib and/or cartopy is not available, can not make plots')
import opendrift
from opendrift.timer import Timeable
from opendrift.errors import NotCoveredError
from opendrift.readers.basereader import BaseReader, standard_names
from opendrift.readers import reader_from_url, reader_global_landmask
from opendrift.models.physics_methods import PhysicsMethods
class OpenDriftSimulation(PhysicsMethods, Timeable):
"""Generic trajectory model class, to be extended (subclassed).
This as an Abstract Base Class, meaning that only subclasses can
be initiated and used.
Any specific subclass ('model') must contain its own (or shared)
specific type of particles (ElementType), whose properties are
updated at each time_step using method update() on basis of model
physics/chemistry/biology and 'required_variables' (environment)
which are provided by one or more Reader objects.
Attributes:
ElementType: the type (class) of particles to be used by this model
elements: object of the class ElementType, storing the specific
particle properties (ndarrays and scalars) of all active particles
as named attributes. Elements are added by seeding-functions
(presently only one implemented: seed_elements).
elements_deactivated: ElementType object containing particles which
have been deactivated (and removed from 'elements')
elements_scheduled: ElementType object containing particles which
have been scheduled, but not yet activated
required_variables: list of strings of CF standard_names which is
needed by this model (update function) to update properties of
particles ('elements') at each time_step. This core class has
no required_elements, this is implemented by subclasses/modules.
environment: recarray storing environment variables (wind, waves,
current etc) as named attributes. Attribute names follow
standard_name from CF-convention, allowing any OpenDriftSimulation
module/subclass using environment data from any readers which
can provide the requested variables. Used in method 'update'
to update properties of elements every time_step.
time_step: timedelta object, time interval at which element properties
are updated (including advection).
time_step_output: timedelta object, time interval at which element
properties are stored in memory and eventually written to file
readers: Dictionary where values are Reader objects, and names are
unique reference keywords used to access a given reader (typically
filename or URL)
priority_list: OrderedDict where names are variable names,
and values are lists of names (kewywords) of the reader, in the
order of priority (user defined) of which the readers shall be
called to retrieve environmental data.
"""
__metaclass__ = ABCMeta
status_categories = ['active'] # Particles are active by default
# Default plotting colors of trajectory endpoints
status_colors_default = {
'initial': 'green',
'active': 'blue',
'missing_data': 'gray'
}
CONFIG_LEVEL_ESSENTIAL = 1
CONFIG_LEVEL_BASIC = 2
CONFIG_LEVEL_ADVANCED = 3
max_speed = 1 # Assumed max average speed of any element
required_profiles_z_range = None # [min_depth, max_depth]
plot_comparison_colors = [
'k', 'r', 'g', 'b', 'm', 'c', 'y', 'crimson', 'indigo', 'lightcoral',
'grey', 'sandybrown', 'palegreen', 'gold', 'yellowgreen', 'lime',
'steelblue', 'navy', 'darkviolet'
]
plot_comparison_colors = plot_comparison_colors + plot_comparison_colors
proj_latlon = pyproj.Proj('+proj=latlong')
@classmethod
def SRS(cls):
return cls.proj_latlon
def __init__(self,
seed=0,
iomodule='netcdf',
loglevel=logging.DEBUG,
logtime='%H:%M:%S',
logfile=None):
"""Initialise OpenDriftSimulation
Args:
seed: integer or None. A given integer will yield identical
random numbers drawn each simulation. Random numbers are
e.g. used to distribute particles spatially when seeding,
and may be used by modules (subclasses) for e.g. diffusion.
Specifying a fixed value (default: 0) is useful for sensitivity
tests. With seed = None, different random numbers will be drawn
for subsequent runs, even with identical configuration/input.
iomodule: name of module used to export data
default: netcdf, see :py:mod:`opendrift.io` for more alternatives.
`iomodule` is module/filename without preceeding `io_`
loglevel: set to 0 (default) to retrieve all debug information.
Provide a higher value (e.g. 20) to receive less output.
Use the string 'custom' to configure logging from outside.
logtime: if True, a time stamp is given for each logging line.
logtime can also be given as a python time specifier
(e.g. '%H:%M:%S')
"""
self.show_continuous_performance = False
self.origin_marker = None # Dictionary to store named seeding locations
self.minvals = {
} # Dicionaries to store minimum and maximum values of variables
self.maxvals = {}
# List to store GeoJSON dicts of seeding commands
self.seed_geojson = []
# Dict to store readers
self.readers = OrderedDict(
) # Dictionary, key=name, value=reader object
self.priority_list = OrderedDict()
# Make copies of dictionaries so that they are private to each instance
self.status_categories = ['active'] # Particles are active by default
self.status_colors_default = self.status_colors_default.copy()
if hasattr(self, 'status_colors'):
# Append model specific colors to (and override) default colors
self.status_colors_default.update(self.status_colors)
self.status_colors = self.status_colors_default
else:
self.status_colors = self.status_colors_default
# Using a fixed seed will generate the same random numbers
# each run, useful for sensitivity tests
# Use seed = None to get different random numbers each time
np.random.seed(seed)
self.steps_calculation = 0 # Increase for each simulation step
self.steps_output = 0
self.elements_deactivated = self.ElementType() # Empty array
self.elements = self.ElementType() # Empty array
if loglevel != 'custom':
format = '%(levelname)-7s %(name)s: %(message)s'
datefmt = None
if logtime is not False:
format = '%(asctime)s ' + format
if logtime is not True:
datefmt = logtime
formatter = logging.Formatter(format, datefmt=datefmt)
if loglevel < 10: # 0 is NOTSET, giving no output
loglevel = 10
od_loggers = [
logging.getLogger('opendrift'),
logging.getLogger('opendrift_landmask_data')
]
if logfile is not None:
handler = logging.FileHandler(logfile, mode='w')
handler.setFormatter(formatter)
for l in od_loggers:
l.setLevel(loglevel)
l.handlers = []
l.addHandler(handler)
else:
import coloredlogs
fields = coloredlogs.DEFAULT_FIELD_STYLES
fields['levelname']['color'] = 'magenta'
# coloredlogs does not create duplicate handlers
for l in od_loggers:
coloredlogs.install(level=loglevel,
logger=l,
fmt=format,
datefmt=datefmt,
field_styles=fields)
# Prepare outfile
try:
io_module = __import__(
'opendrift.export.io_' + iomodule,
fromlist=['init', 'write_buffer', 'close', 'import_file'])
except ImportError:
logger.info('Could not import iomodule ' + iomodule)
self.io_init = types.MethodType(io_module.init, self)
self.io_write_buffer = types.MethodType(io_module.write_buffer, self)
self.io_close = types.MethodType(io_module.close, self)
self.io_import_file = types.MethodType(io_module.import_file, self)
self.io_import_file_xarray = types.MethodType(
io_module.import_file_xarray, self)
# Set configuration options
self._add_config({
# type, default, min, max, enum, important, value, units, description
'general:use_auto_landmask': {
'type':
'bool',
'default':
True,
'description':
'A built-in GSHHG global landmask is used if True, '
'otherwise landmask is taken from reader or fallback value.',
'level':
self.CONFIG_LEVEL_ADVANCED
},
'general:coastline_action': {
'type':
'enum',
'enum': ['none', 'stranding', 'previous'],
'default':
'stranding',
'level':
self.CONFIG_LEVEL_BASIC,
'description':
'None means that objects may also move over land. '
'stranding means that objects are deactivated if they hit land. '
'previous means that objects will move back to the previous location '
'if they hit land'
},
'general:time_step_minutes': {
'type':
'float',
'min':
.01,
'max':
1440,
'default':
60,
'units':
'minutes',
'level':
self.CONFIG_LEVEL_BASIC,
'description':
'Calculation time step used for the simulation. The output time step may '
'be equal or larger than this.'
},
'general:time_step_output_minutes': {
'type':
'float',
'min':
1,
'max':
1440,
'default':
None,
'units':
'minutes',
'level':
self.CONFIG_LEVEL_BASIC,
'description':
'Output time step, i.e. the interval at which output is saved. This must be larger than '
'the calculation time step, and be an integer multiple of this.'
},
'seed:ocean_only': {
'type':
'bool',
'default':
True,
'description':
'If True, elements seeded on land will be moved to the closest '
'position in ocean',
'level':
self.CONFIG_LEVEL_ADVANCED
},
'seed:number': {
'type': 'int',
'default': 1,
'min': 1,
'max': 100000000,
'units': 1,
'description': 'The number of elements for the simulation.',
'level': self.CONFIG_LEVEL_BASIC
},
'drift:max_age_seconds': {
'type': 'float',
'default': None,
'min': 0,
'max': np.inf,
'units': 'seconds',
'description':
'Elements will be deactivated when this age is reached',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:advection_scheme': {
'type':
'enum',
'enum': ['euler', 'runge-kutta', 'runge-kutta4'],
'default':
'euler',
'level':
self.CONFIG_LEVEL_ADVANCED,
'description':
'Numerical advection scheme for ocean current advection'
},
'drift:current_uncertainty': {
'type': 'float',
'default': 0,
'min': 0,
'max': 5,
'units': 'm/s',
'description':
'Add gaussian perturbation with this standard deviation to current components at each time step',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:current_uncertainty_uniform': {
'type': 'float',
'default': 0,
'min': 0,
'max': 5,
'units': 'm/s',
'description':
'Add gaussian perturbation with this standard deviation to current components at each time step',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:horizontal_diffusivity': {
'type': 'float',
'default': 0,
'min': 0,
'max': 100000,
'units': 'm2/s',
'description': 'Add horizontal diffusivity (random walk)',
'level': self.CONFIG_LEVEL_BASIC
},
'drift:wind_uncertainty': {
'type': 'float',
'default': 0,
'min': 0,
'max': 5,
'units': 'm/s',
'description':
'Add gaussian perturbation with this standard deviation to wind components at each time step.',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:relative_wind': {
'type': 'bool',
'default': False,
'description':
'If True, wind drift is calculated for absolute wind (wind vector minus ocean surface current vector).',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:deactivate_north_of': {
'type': 'float',
'default': None,
'min': -90,
'max': 90,
'units': 'degrees',
'description':
'Elements are deactivated if the move further north than this limit',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:deactivate_south_of': {
'type': 'float',
'default': None,
'min': -90,
'max': 90,
'units': 'degrees',
'description':
'Elements are deactivated if the move further south than this limit',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:deactivate_east_of': {
'type': 'float',
'default': None,
'min': -360,
'max': 360,
'units': 'degrees',
'description':
'Elements are deactivated if the move further east than this limit',
'level': self.CONFIG_LEVEL_ADVANCED
},
'drift:deactivate_west_of': {
'type': 'float',
'default': None,
'min': -360,
'max': 360,
'units': 'degrees',
'description':
'Elements are deactivated if the move further west than this limit',
'level': self.CONFIG_LEVEL_ADVANCED
},
})
# Add default element properties to config
c = {}
for p in self.ElementType.variables:
v = self.ElementType.variables[p]
if 'seed' in v and v['seed'] is False:
continue # Properties which may not be provided by user
minval = v['min'] if 'min' in v else None
maxval = v['max'] if 'max' in v else None
units = v['units'] if 'units' in v else None
c['seed:%s' % p] = {
'type':
v['type'] if 'type' in v else 'float',
'min':
v['min'] if 'min' in v else None,
'max':
v['max'] if 'max' in v else None,
'units':
v['units'] if 'units' in v else None,
'default':
v['default'] if 'default' in v else None,
'description':
v['description']
if 'description' in v else 'Seeding value of %s' % p,
'level':
v['level'] if 'level' in v else self.CONFIG_LEVEL_ADVANCED
}
self._add_config(c)
# Add constant and fallback environment variables to config
c = {}
for v in self.required_variables:
minval = maxval = units = None
description_constant = 'Use constant value for %s' % v
description_fallback = 'Fallback value for %s if not available from any reader' % v
if v in standard_names:
if 'valid_min' in standard_names[v]:
minval = standard_names[v]['valid_min']
if 'valid_max' in standard_names[v]:
maxval = standard_names[v]['valid_max']
if 'long_name' in standard_names[v]:
description_constant = description_fallback = standard_names[
v]['long_name']
if 'units' in standard_names[v]:
units = standard_names[v]['units']
c['environment:constant:%s' % v] = {
'type': 'float',
'min': minval,
'max': maxval,
'units': units,
'default': None,
'level': OpenDriftSimulation.CONFIG_LEVEL_BASIC,
'description': description_constant
}
c['environment:fallback:%s' % v] = {
'type':
'float',
'min':
minval,
'max':
maxval,
'units':
units,
'default':
self.required_variables[v]['fallback']
if 'fallback' in self.required_variables[v] else None,
'level':
OpenDriftSimulation.CONFIG_LEVEL_BASIC,
'description':
description_fallback
}
self._add_config(c)
self.history = None # Recarray to store trajectories and properties
# Find variables which require profiles
self.required_profiles = [
var for var in self.required_variables
if 'profiles' in self.required_variables[var]
and self.required_variables[var]['profiles'] is True
]
# Find variables which are desired, but not required
self.desired_variables = [
var for var in self.required_variables
if 'important' in self.required_variables[var]
and self.required_variables[var]['important'] is False
]
self.timer_start('total time')
self.timer_start('configuration')
self.add_metadata('opendrift_version', opendrift.__version__)
logger.info('OpenDriftSimulation initialised (version %s)' %
opendrift.version.version_or_git())
# Check if dependencies are outdated
import importlib
if importlib.util.find_spec("cmocean") is None:
logger.warning('#' * 82)
logger.warning(
'Dependencies are outdated, please update with: conda env update -f environment.yml'
)
logger.warning('#' * 82)
def list_config(self, prefix=''):
"""List all possible configuration settings with values"""
str = '\n=============================================\n'
for key in self._config:
if key.startswith(prefix):
str += '%s [%s]\n' % (key, self.get_config(key))
str += '=============================================\n'
logger.info(str)
def list_configspec(self, prefix=''):
"""Readable formatting of config specification"""
for c, i in self._config.items():
if c.startswith(prefix):
val = i['value'] if 'value' in i else None
val = val(self) if callable(val) else val
if i['type'] == 'bool':
rang = ''
elif i['type'] in ['float', 'int']:
rang = 'min: %s, max: %s [%s]' % (i['min'], i['max'],
i['units'])
elif i['type'] == 'enum':
rang = i['enum']
print('%-35s [%s] %-5s %s %s...' %
(c, val, i['type'], rang, i['description'][0:20]))
def get_configspec(self, prefix='', level=[1, 2, 3]):
if not isinstance(level, list):
level = [level]
configspec = {
k: v
for (k, v) in self._config.items()
if k.startswith(prefix) and self._config[k]['level'] in level
}
return configspec
def _add_config(self, config, overwrite=True):
"""Add configuration settings
config is a dictionary where keys are configuration keywords,
and values are dictionaries with the following contents:
type (string): 'float', 'int', 'bool' or 'enum'
min, max (float/int/None): (only when type is 'float' or 'int')
The minimum and maximum allowed values for this setting.
May also be None if there are no upper/lowe limits.
units (string): (only when type is 'float' or 'int')
The units of this config setting.
enum (list): (only when type is 'enum')
A list of possible values for this setting.
default (number/bool/string/None):
The default value for this setting.
value (number/bool/string/None): The actual value for this setting.
This is updated with self.set_config(key, value) and retrieved
with self.get_config(key)
description (string):
A description of this config setting, for users/documentation/GUIs.
level (int): A parameter to determine the level of exposure in GUIs
1 self.CONFIG_LEVEL_ESSENTIAL: important setting which user has to consider
2 self.CONFIG_LEVEL_BASIC: setting which many users may consider
3 self.CONFIG_LEVEL_ADVANCED: setting relevant only to advanced users
"""
caller = inspect.stack()[1]
caller = os.path.splitext(os.path.basename(caller.filename))[0]
logger.debug('Adding %i config items from %s' % (len(config), caller))
if not hasattr(self, '_config'):
self._config = {}
remove = []
for c, i in config.items(): # Check that provided config is conistent
if c in self._config:
if overwrite is False:
logger.debug(
' Config item %s is already specified, not overwriting'
% c)
remove.append(c)
else:
logger.debug(' Overwriting config item %s' % c)
for p in ['type', 'description', 'level']:
if p not in i:
raise ValueError(
'"%s" must be specified for config item %s' % (p, c))
if i['level'] != self.CONFIG_LEVEL_ESSENTIAL and 'default' not in i: #or i['default'] is None:
raise ValueError(
'A default value must be provided for config item %s' % c)
if i['type'] == 'enum':
if 'enum' not in i or not isinstance(i['enum'], list):
raise ValueError(
'"enum" of type list must be provided for config item %s'
% (c))
elif i['type'] in ['float', 'int']:
for p in ['min', 'max', 'units']:
if p not in i:
raise ValueError(
'"%s" not provided for config item %s' % (p, c))
elif i['type'] == 'bool':
pass # no check for bool
else:
raise ValueError(
'Config type "%s" (%s) is not defined. Valid options are: '
'float, int, enum, bool' % (i['type'], c))
if 'default' in i:
i['value'] = i['default']
for r in remove:
del config[r]
self._config.update(config)
def set_config(self, key, value):
if not key in self._config:
self.list_config()
raise ValueError('No config setting named %s' % key)
i = self._config[key]
if i['type'] == 'bool':
if value not in [True, False]:
raise ValueError('Config value %s must be True or False' % key)
elif i['type'] in ['float', 'int'] and value is not None:
if (i['min'] is not None
and value < i['min']) or (i['max'] is not None
and value > i['max']):
raise ValueError('Config value %s must be between %s and %s' %
(key, i['min'], i['max']))
if i['type'] == 'float' and value is not None:
value = float(value)
elif i['type'] == 'int' and value is not None:
value = int(value)
elif i['type'] == 'enum':
if value not in i['enum']:
suggestion = ''
if len(i['enum']) > 5:
import difflib
matches = difflib.get_close_matches(value,
i['enum'],
n=20,
cutoff=.3)
containing = [e for e in i['enum'] if value in e]
matches = list(set(matches) | set(containing))
if len(matches) > 0:
matches.sort()
suggestion = '\nDid you mean any of these?\n%s' % str(
matches)
raise ValueError(
'Wrong configuration (%s=%s), possible values are:\n\t%s\n%s' %
(key, value, i['enum'], suggestion))
self._config[key]['value'] = value
def _set_config_default(self, key, value):
"""Update both default and actual value of a config setting"""
self.set_config(key, value)
self._config[key]['default'] = self.get_config(key)
def get_config(self, key):
if not key in self._config:
raise ValueError('No config setting named %s' % key)
return (self._config[key]['value'])
def add_metadata(self, key, value):
"""Add item to metadata dictionary, for export as netCDF global attributes"""
if not hasattr(self, 'metadata_dict'):
from collections import OrderedDict
self.metadata_dict = OrderedDict()
self.metadata_dict[key] = value
def prepare_run(self):
pass # to be overloaded when needed
def store_present_positions(self, IDs=None, lons=None, lats=None):
"""Store present element positions, in case they shall be moved back"""
if self.get_config('general:coastline_action') == 'previous' or (
'general:seafloor_action' in self._config
and self.get_config('general:seafloor_action') == 'previous'):
if not hasattr(self, 'previous_lon'):
self.previous_lon = np.ma.masked_all(self.num_elements_total())
self.previous_lat = np.ma.masked_all(self.num_elements_total())
if IDs is None:
IDs = self.elements.ID
lons = self.elements.lon
lats = self.elements.lat
self.newly_seeded_IDs = None
else:
# to check if seeded on land
if len(IDs) > 0:
self.newly_seeded_IDs = np.copy(IDs)
else:
self.newly_seeded_IDs = None
self.previous_lon[IDs - 1] = np.copy(lons)
self.previous_lat[IDs - 1] = np.copy(lats)
def store_previous_variables(self):
"""Store some environment variables, for access at next time step"""
if not hasattr(self, 'store_previous'):
return
if not hasattr(self, 'variables_previous'):
# Create ndarray to store previous variables
dtype = [(var, np.float32) for var in self.store_previous]
self.variables_previous = np.array(np.full(
self.num_elements_total(), np.nan),
dtype=dtype)
# Copying variables_previous to environment_previous
self.environment_previous = self.variables_previous[self.elements.ID -
1]
# Use new values for new elements which have no previous value
for var in self.store_previous:
undefined = np.isnan(self.environment_previous[var])
self.environment_previous[var][undefined] = getattr(
self.environment, var)[undefined]
self.environment_previous = self.environment_previous.view(np.recarray)
for var in self.store_previous:
self.variables_previous[var][self.elements.ID - 1] = getattr(
self.environment, var)
def interact_with_coastline(self, final=False):
"""Coastline interaction according to configuration setting"""
if self.num_elements_active() == 0:
return
i = self.get_config('general:coastline_action')
if not hasattr(self, 'environment') or not hasattr(
self.environment, 'land_binary_mask'):
return
if i == 'none': # Do nothing
return
if final is True: # Get land_binary_mask for final location
en, en_prof, missing = \
self.get_environment(['land_binary_mask'],
self.time,
self.elements.lon,
self.elements.lat,
self.elements.z,
None)
self.environment.land_binary_mask = en.land_binary_mask
if i == 'stranding': # Deactivate elements on land
self.deactivate_elements(self.environment.land_binary_mask == 1,
reason='stranded')
elif i == 'previous': # Go back to previous position (in water)
if self.newly_seeded_IDs is not None:
self.deactivate_elements(
(self.environment.land_binary_mask == 1) &
(self.elements.age_seconds
== self.time_step.total_seconds()),
reason='seeded_on_land')
on_land = np.where(self.environment.land_binary_mask == 1)[0]
if len(on_land) == 0:
logger.debug('No elements hit coastline.')
else:
logger.debug('%s elements hit coastline, '
'moving back to water' % len(on_land))
on_land_ID = self.elements.ID[on_land]
self.elements.lon[on_land] = \
np.copy(self.previous_lon[on_land_ID - 1])
self.elements.lat[on_land] = \
np.copy(self.previous_lat[on_land_ID - 1])
self.environment.land_binary_mask[on_land] = 0
def interact_with_seafloor(self):
"""Seafloor interaction according to configuration setting"""
if self.num_elements_active() == 0:
return
if 'sea_floor_depth_below_sea_level' not in self.priority_list:
return
sea_floor_depth = self.sea_floor_depth()
below = np.where(self.elements.z < -sea_floor_depth)[0]
if len(below) == 0:
logger.debug('No elements hit seafloor.')
return
i = self.get_config('general:seafloor_action')
if i == 'lift_to_seafloor':
logger.debug('Lifting %s elements to seafloor.' % len(below))
self.elements.z[below] = -sea_floor_depth[below]
elif i == 'deactivate':
self.deactivate_elements(self.elements.z < -sea_floor_depth,
reason='seafloor')
self.elements.z[below] = -sea_floor_depth[below]
elif i == 'previous': # Go back to previous position (in water)
logger.warning('%s elements hit seafloor, '
'moving back ' % len(below))
below_ID = self.elements.ID[below]
self.elements.lon[below] = \
np.copy(self.previous_lon[below_ID - 1])
self.elements.lat[below] = \
np.copy(self.previous_lat[below_ID - 1])
@abstractmethod
def update(self):
"""Any trajectory model implementation must define an update method.
This method must/can use environment data (self.environment) to
update properties (including position) of its particles (self.elements)
"""
@abstractproperty
def ElementType(self):
"""Any trajectory model implementation must define an ElementType."""
@abstractproperty
def required_variables(self):
"""Any trajectory model implementation must list needed variables."""
def test_data_folder(self):
import opendrift
return os.path.abspath(
os.path.join(os.path.dirname(opendrift.__file__), '..', 'tests',
'test_data')) + os.path.sep
def performance(self):
'''Report the time spent on various tasks'''
outStr = '--------------------\n'
outStr += 'Reader performance:\n'
for r in self.readers:
reader = self.readers[r]
if reader.is_lazy:
continue
outStr += '--------------------\n'
outStr += r + '\n'
outStr += reader.performance()
outStr += '--------------------\n'
outStr += 'Performance:\n'
for category, time in self.timing.items():
timestr = str(time)[0:str(time).find('.') + 2]
for i, c in enumerate(timestr):
if c in '123456789.':
timestr = timestr[i:] # Strip leading 0 and :
if c == '.':
timestr = '0' + timestr
break
parts = category.split(':')
indent = ' ' * (len(parts) - 1)
category = parts[-1]
category = category.replace('<colon>', ':')
outStr += '%s%7s %s\n' % (indent, timestr, category)
outStr += '--------------------\n'
return outStr
def add_reader(self, readers, variables=None, first=False):
"""Add one or more readers providing variables used by this model.
Method may be called subsequently to add more readers
for other variables.
Args:
readers: one or more (list) Reader objects.
variables (optional): list of strings of standard_name of variables to be provided by this/these reader(s).
first: Set to True if this reader should be set as first option
"""
# Convert any strings to lists, for looping
if isinstance(variables, str):
variables = [variables]
if isinstance(readers, BaseReader):
readers = [readers]
for reader in readers:
# Check if input class is of correct type
if not isinstance(reader, BaseReader) and \
not hasattr(reader, '_lazyname'):
raise TypeError('Please provide Reader object')
# Check that reader class contains the requested variables
if variables is not None:
missingVariables = set(variables) - set(reader.variables)
if missingVariables:
raise ValueError(
'Reader %s does not provide variables: %s' %
(reader.name, list(missingVariables)))
# Finally add new reader to list
if reader.name in self.readers:
# Reader names must be unique, adding integer
for n in range(99999):
tmp_name = reader.name + '_%d' % n
if tmp_name not in self.readers:
reader.name = tmp_name
break
# Horizontal buffer of reader must be large enough to cover
# the distance possibly covered by elements within a time step
if not reader.is_lazy:
reader.set_buffer_size(max_speed=self.max_speed)
self.readers[reader.name] = reader
logger.debug('Added reader ' + reader.name)
# Add this reader for each of the given variables
if reader.is_lazy is False:
for variable in variables if variables else reader.variables:
if variable in list(self.priority_list):
if reader.name not in self.priority_list[variable]:
if first is True:
self.priority_list[variable].insert(
0, reader.name)
else:
self.priority_list[variable].append(
reader.name)
else:
self.priority_list[variable] = [reader.name]
# Remove/hide variables not needed by the current trajectory model
for variable in list(self.priority_list):
if variable not in self.required_variables:
del self.priority_list[variable]
def add_readers_from_list(self, urls, timeout=10, lazy=True):
'''Make readers from a list of URLs or paths to netCDF datasets'''
if isinstance(urls, str):
urls = [urls]
if lazy is True:
from opendrift.readers.reader_lazy import Reader
readers = [Reader(u) for u in urls]
self.add_reader(readers)
return
readers = [reader_from_url(u, timeout) for u in urls]
self.add_reader([r for r in readers if r is not None])
def add_readers_from_file(self, filename, timeout=10, lazy=True):
fp = open(filename, 'r')
sources = fp.readlines()
sources = [line.strip() for line in sources if line[0] != '#']
self.add_readers_from_list(sources, timeout, lazy=lazy)
def list_environment_variables(self):
"""Return list of all variables provided by the added readers."""
variables = []
for reader in self.readers:
variables.extend(self.readers[reader].variables)
return variables
def missing_variables(self):
"""Return list of all variables for which no reader has been added."""
return [
var for var in self.required_variables
if var not in self.priority_list
]
def get_reader_groups(self, variables=None):
"""Find which groups of variables are provided by the same readers.
This function loops through 'priority_list' (see above) and groups
all variables returned by the same readers in the same order. This
allows asking readers for several variables simultaneously,
improving performance. Used by method 'get_environment'.
Returns:
variable_groups: list of lists of (environment) variables.
reader_groups: list of list of reader names, corresponding to
each of the variable_groups.
"""
if variables is None:
variables = list(self.required_variables)
reader_groups = []
# Find all unique reader groups
for variable, readers in self.priority_list.items():
if (variable in variables) and (readers not in reader_groups):
reader_groups.append(readers)
# Find all variables returned by the same reader group
variable_groups = [None] * len(reader_groups)
for variable, readers in self.priority_list.items():
if variable not in variables:
continue
for i, readerGroup in enumerate(reader_groups):
if readers == readerGroup:
if variable_groups[i]:
variable_groups[i].append(variable)
else:
variable_groups[i] = [variable]
missing_variables = list(
set(variables) - set(self.priority_list.keys()))
return variable_groups, reader_groups, missing_variables
def _lazy_readers(self):
return [r for r in self.readers if self.readers[r].is_lazy is True]
def _unlazy_readers(self):
return [r for r in self.readers if self.readers[r].is_lazy is False]
def _initialise_next_lazy_reader(self):
'''Returns reader if successful and None if no more readers'''
lazy_readers = self._lazy_readers()
if len(lazy_readers) == 0:
return None
lazyname = lazy_readers[0]
reader = self.readers[lazyname]
try:
reader.initialise()
except Exception as e:
logger.debug(e)
logger.warning('Reader could not be initialised, and is'
' discarded: ' + lazyname)
self.discard_reader(reader, reason='could not be initialized')
return self._initialise_next_lazy_reader() # Call self
reader.set_buffer_size(max_speed=self.max_speed)
# Update reader lazy name with actual name
self.readers[reader.name] = \
self.readers.pop(lazyname)
for var in reader.variables:
if var in list(self.priority_list):
self.priority_list[var].append(reader.name)
else:
self.priority_list[var] = [reader.name]
# Remove variables not needed
for variable in list(self.priority_list):
if variable not in self.required_variables:
del self.priority_list[variable]
return reader
def discard_reader_if_not_relevant(self, reader):
if reader.is_lazy:
return False
if reader.start_time is not None:
if hasattr(self, 'expected_end_time') and reader.start_time > self.expected_end_time:
self.discard_reader(reader, 'starts after simulation end')
return True
if hasattr(self, 'start_time') and reader.end_time < self.start_time:
self.discard_reader(reader, 'ends before simuation start')
return True
if hasattr(self, 'time') and reader.end_time < self.time:
self.discard_reader(reader, 'ends before simuation is finished')
return True
if len(set(self.required_variables) & set(reader.variables)) == 0:
self.discard_reader(reader, reason='does not contain any relevant variables')
return True
if not hasattr(reader, 'checked_for_overlap'):
if not reader.global_coverage():
if not hasattr(self, 'simulation_extent'):
logger.warning('Simulation has no simulation_extent, cannot check reader coverage')
return False
# TODO
# need a better coverage/overlap check below
corners = reader.xy2lonlat([reader.xmin, reader.xmin, reader.xmax, reader.xmax],
[reader.ymax, reader.ymin, reader.ymax, reader.ymin])
rlonmin = np.min(corners[0])
rlonmax = np.max(corners[0])
rlatmin = np.min(corners[1])
rlatmax = np.max(corners[1])
if hasattr(reader, 'proj4') and 'stere' in reader.proj4 and 'lat_0=90' in reader.proj4:
rlatmax = 90
if hasattr(reader, 'proj4') and 'stere' in reader.proj4 and 'lat_0=-90' in reader.proj4:
rlatmin = -90
if rlatmin > self.simulation_extent[3]:
self.discard_reader(reader, reason='too far north')
return True
if rlatmax < self.simulation_extent[1]:
self.discard_reader(reader, reason='too far south')
return True
if rlonmax < self.simulation_extent[0]:
self.discard_reader(reader, reason='too far west')
return True
if rlonmin > self.simulation_extent[2]:
self.discard_reader(reader, reason='too far east')
return True
reader.checked_for_overlap = True
return False # Reader is not discarded
def discard_reader(self, reader, reason):
readername = reader.name
logger.debug('Discarding reader (%s): %s' % (reason, readername))
del self.readers[readername]
if not hasattr(self, 'discarded_readers'):
self.discarded_readers = {readername: reason}
else:
self.discarded_readers[readername] = reason
# Remove from priority list
for var in self.priority_list.copy():
self.priority_list[var] = [
r for r in self.priority_list[var] if r != readername
]
if len(self.priority_list[var]) == 0:
del self.priority_list[var]
def get_environment(self, variables, time, lon, lat, z, profiles):
'''Retrieve environmental variables at requested positions.
Updates:
Buffer (raw data blocks) for each reader stored for performance:
[readers].var_block_before (last before requested time)
[readers].var_block_after (first after requested time)
- lists of one ReaderBlock per variable group: time, x, y, [vars]
Returns:
environment: recarray with variables as named attributes,
interpolated to requested positions/time.
'''
self.timer_start('main loop:readers')
# Initialise ndarray to hold environment variables
dtype = [(var, np.float32) for var in variables]
env = np.ma.array(np.zeros(len(lon)) * np.nan, dtype=dtype)
if not hasattr(self, 'fallback_values'):
self.set_fallback_values(refresh=False)
# Discard any existing readers which are not relevant
for readername, reader in self.readers.copy().items():
self.discard_reader_if_not_relevant(reader)
if 'drift:truncate_ocean_model_below_m' in self._config:
truncate_depth = self.get_config(
'drift:truncate_ocean_model_below_m')
if truncate_depth is not None:
logger.debug('Truncating ocean models below %s m' %
truncate_depth)
z = z.copy()
z[z < -truncate_depth] = -truncate_depth
if self.required_profiles_z_range is not None:
self.required_profiles_z_range = np.array(
self.required_profiles_z_range)
self.required_profiles_z_range[
self.required_profiles_z_range <
-truncate_depth] = -truncate_depth
# Initialise more lazy readers if necessary
missing_variables = ['missingvar']
while (len(missing_variables) > 0 and len(self._lazy_readers()) > 0):
variable_groups, reader_groups, missing_variables = \
self.get_reader_groups(variables)
if hasattr(self, 'desired_variables'):
missing_variables = list(
set(missing_variables) - set(self.desired_variables))
if len(missing_variables) > 0:
logger.debug('Variables not covered by any reader: ' +
str(missing_variables))
reader = 'NotNone'
while reader is not None:
reader = self._initialise_next_lazy_reader()
if reader is not None:
if self.discard_reader_if_not_relevant(reader):
reader = None
if reader is not None:
if (reader.covers_time(self.time) and
len(reader.covers_positions(lon, lat)[0]) > 0):
missing_variables = list(
set(missing_variables) - set(reader.variables))
if len(missing_variables) == 0:
break # We cover now all variables
# For each variable/reader group:
variable_groups, reader_groups, missing_variables = \
self.get_reader_groups(variables)
for variable in variables: # Fill with fallback value if no reader
co = self.get_config('environment:fallback:%s' % variable)
if co is not None:
env[variable] = np.ma.ones(env[variable].shape) * co
for i, variable_group in enumerate(variable_groups):
logger.debug('----------------------------------------')
logger.debug('Variable group %s' % (str(variable_group)))
logger.debug('----------------------------------------')
reader_group = reader_groups[i]
missing_indices = np.array(range(len(lon)))
# For each reader:
for reader_name in reader_group:
logger.debug('Calling reader ' + reader_name)
logger.debug('----------------------------------------')
self.timer_start('main loop:readers:' +
reader_name.replace(':', '<colon>'))
reader = self.readers[reader_name]
if reader.is_lazy:
logger.warning('Reader is lazy, should not happen')
import sys
sys.exit('Should not happen')
if not reader.covers_time(time):
logger.debug('\tOutside time coverage of reader.')
if reader_name == reader_group[-1]:
if self._initialise_next_lazy_reader() is not None:
logger.debug(
'Missing variables: calling get_environment recursively'
)
return self.get_environment(
variables, time, lon, lat, z, profiles)
continue
# Fetch given variables at given positions from current reader
try:
logger.debug('Data needed for %i elements' %
len(missing_indices))
# Check if vertical profiles are requested from reader
if profiles is not None:
profiles_from_reader = list(
set(variable_group) & set(profiles))
if profiles_from_reader == []:
profiles_from_reader = None
else:
profiles_from_reader = None
env_tmp, env_profiles_tmp = \
reader.get_variables_interpolated(
variable_group, profiles_from_reader,
self.required_profiles_z_range, time,
lon[missing_indices], lat[missing_indices],
z[missing_indices], self.proj_latlon)
except NotCoveredError as e:
logger.info(e)
self.timer_end('main loop:readers:' +
reader_name.replace(':', '<colon>'))
if reader_name == reader_group[-1]:
if self._initialise_next_lazy_reader() is not None:
logger.debug(
'Missing variables: calling get_environment recursively'
)
return self.get_environment(
variables, time, lon, lat, z, profiles)
continue
except Exception as e: # Unknown error
# TODO:
# This could e.g. be due to corrupted files or
# hangig thredds-servers. A reader could be discarded
# after e.g. 3 such failed attempts
logger.info('========================')
logger.warning(e)
logger.debug(traceback.format_exc())
logger.info('========================')
self.timer_end('main loop:readers:' +
reader_name.replace(':', '<colon>'))
if reader_name == reader_group[-1]:
if self._initialise_next_lazy_reader() is not None:
logger.debug(
'Missing variables: calling get_environment recursively'
)
return self.get_environment(
variables, time, lon, lat, z, profiles)
continue
# Copy retrieved variables to env array, and mask nan-values
for var in variable_group:
if var not in self.required_variables:
logger.debug('Not returning env-variable: ' + var)
continue
if var not in env.dtype.names:
continue # Skipping variables that are only used to derive needed variables
env[var][missing_indices] = np.ma.masked_invalid(
env_tmp[var][0:len(missing_indices)]).astype('float32')
if profiles_from_reader is not None and var in profiles_from_reader:
if 'env_profiles' not in locals():
env_profiles = env_profiles_tmp
# TODO: fix to be checked
if var in env_profiles and var in env_profiles_tmp:
# If one profile has fewer vertical layers than
# the other, we use only the overlapping part
if len(env_profiles['z']) != len(
env_profiles_tmp['z']):
logger.debug('Warning: different number of '
' vertical layers: %s and %s' %
(len(env_profiles['z']),
len(env_profiles_tmp['z'])))
z_ind = np.arange(
np.minimum(
len(env_profiles['z']) - 1,
len(env_profiles_tmp['z']) - 1))
# len(missing_indices) since 2 points might have been added and not removed
env_profiles_tmp[var] = np.ma.atleast_2d(
env_profiles_tmp[var])
env_profiles[var][np.ix_(z_ind, missing_indices)] = \
np.ma.masked_invalid(env_profiles_tmp[var][z_ind,0:len(missing_indices)]).astype('float32')
# For profiles with different numbers of layers, we extrapolate
if env_profiles[var].shape[0] > 1:
missingbottom = np.isnan(
env_profiles[var][-1, :])
env_profiles[var][
-1, missingbottom] = env_profiles[var][
-2, missingbottom]
# Detect elements with missing data, for present reader group
if hasattr(env_tmp[variable_group[0]], 'mask'):
try:
del combined_mask
except:
pass
for var in variable_group:
tmp_var = np.ma.masked_invalid(env_tmp[var])
# Changed 13 Oct 2016, but uncertain of effect
# TODO: to be checked
#tmp_var = env_tmp[var]
if 'combined_mask' not in locals():
combined_mask = np.ma.getmask(tmp_var)
else:
combined_mask = \
np.ma.mask_or(combined_mask,
np.ma.getmask(tmp_var),
shrink=False)
try:
if len(missing_indices) != len(combined_mask):
# TODO: mask mismatch due to 2 added points
raise ValueError('Mismatch of masks')
missing_indices = missing_indices[combined_mask]
except Exception as ex: # Not sure what is happening here
logger.info(
'Problems setting mask on missing_indices!')
logger.exception(ex)
else:
missing_indices = [] # temporary workaround
if (type(missing_indices)
== np.int64) or (type(missing_indices) == np.int32):
missing_indices = []
self.timer_end('main loop:readers:' +
reader_name.replace(':', '<colon>'))
if len(missing_indices) == 0:
logger.debug('Obtained data for all elements.')
break
else:
logger.debug('Data missing for %i elements.' %
(len(missing_indices)))
if len(self._lazy_readers()) > 0:
if self._initialise_next_lazy_reader() is not None:
logger.warning(
'Missing variables: calling get_environment recursively'
)
return self.get_environment(
variables, time, lon, lat, z, profiles)
logger.debug('---------------------------------------')
logger.debug('Finished processing all variable groups')
self.timer_start('main loop:readers:postprocessing')
for var in self.fallback_values:
if (var not in variables) and (profiles is None
or var not in profiles):
continue
mask = env[var].mask
if any(mask == True):
logger.debug(
' Using fallback value %s for %s for %s elements' %
(self.fallback_values[var], var, np.sum(mask == True)))
env[var][mask] = self.fallback_values[var]
# Profiles
if profiles is not None and var in profiles:
if 'env_profiles' not in locals():
logger.debug('Creating empty dictionary for profiles not '
'profided by any reader: ' +
str(self.required_profiles))
env_profiles = {}
env_profiles['z'] = \
np.array(self.required_profiles_z_range)[::-1]
if var not in env_profiles:
logger.debug(
' Using fallback value %s for %s for all profiles'
% (self.fallback_values[var], var))
env_profiles[var] = self.fallback_values[var]*\
np.ma.ones((len(env_profiles['z']), self.num_elements_active()))
else:
mask = env_profiles[var].mask
num_masked_values_per_element = np.sum(mask == True)
num_missing_profiles = np.sum(num_masked_values_per_element
== len(env_profiles['z']))
env_profiles[var][mask] = self.fallback_values[var]
logger.debug(
' Using fallback value %s for %s for %s profiles'
% (
self.fallback_values[var],
var,
num_missing_profiles,
))
num_missing_individual = np.sum(
num_masked_values_per_element > 0
) - num_missing_profiles
if num_missing_individual > 0:
logger.debug(
' ...plus %s individual points in other profiles'
% num_missing_individual)
#######################################################
# Some extra checks of units and realistic magnitude
#######################################################
if 'sea_water_temperature' in variables:
t_kelvin = np.where(env['sea_water_temperature'] > 100)[0]
if len(t_kelvin) > 0:
logger.warning(
'Converting temperatures from Kelvin to Celcius')
env['sea_water_temperature'][
t_kelvin] = env['sea_water_temperature'][t_kelvin] - 273.15
if 'env_profiles' in locals(
) and 'sea_water_temperature' in env_profiles.keys():
env_profiles['sea_water_temperature'][:,t_kelvin] = \
env_profiles['sea_water_temperature'][:,t_kelvin] - 273.15
#######################################################
# Parameterisation of unavailable variables
#######################################################
if 'drift:use_tabularised_stokes_drift' in self._config and self.get_config(
'drift:use_tabularised_stokes_drift') is True:
if 'x_wind' not in variables:
logger.debug('No wind available to calculate Stokes drift')
else:
if 'sea_surface_wave_stokes_drift_x_velocity' not in variables or (
env['sea_surface_wave_stokes_drift_x_velocity'].max()
== 0 and
env['sea_surface_wave_stokes_drift_y_velocity'].max()
== 0):
logger.debug('Calculating parameterised stokes drift')
env['sea_surface_wave_stokes_drift_x_velocity'], \
env['sea_surface_wave_stokes_drift_y_velocity'] = \
self.wave_stokes_drift_parameterised((env['x_wind'], env['y_wind']),
self.get_config('drift:tabularised_stokes_drift_fetch'))
if (env['sea_surface_wave_significant_height'].max() == 0):
logger.debug(
'Calculating parameterised significant wave height')
env['sea_surface_wave_significant_height'] = \
self.wave_significant_height_parameterised((env['x_wind'], env['y_wind']),
self.get_config('drift:tabularised_stokes_drift_fetch'))
#############################
# Add uncertainty/diffusion
#############################
# Current
if 'x_sea_water_velocity' in variables and \
'y_sea_water_velocity' in variables:
std = self.get_config('drift:current_uncertainty')
if std > 0:
logger.debug('Adding uncertainty for current: %s m/s' % std)
env['x_sea_water_velocity'] += np.random.normal(
0, std, self.num_elements_active())
env['y_sea_water_velocity'] += np.random.normal(
0, std, self.num_elements_active())
std = self.get_config('drift:current_uncertainty_uniform')
if std > 0:
logger.debug('Adding uncertainty for current: %s m/s' % std)
env['x_sea_water_velocity'] += np.random.uniform(
-std, std, self.num_elements_active())
env['y_sea_water_velocity'] += np.random.uniform(
-std, std, self.num_elements_active())
# Wind
if 'x_wind' in variables and 'y_wind' in variables:
std = self.get_config('drift:wind_uncertainty')
if std > 0:
logger.debug('Adding uncertainty for wind: %s m/s' % std)
env['x_wind'] += np.random.normal(0, std,
self.num_elements_active())
env['y_wind'] += np.random.normal(0, std,
self.num_elements_active())
#####################
# Diagnostic output
#####################
if len(env) > 0:
logger.debug('------------ SUMMARY -------------')
for var in variables:
logger.debug(' %s: %g (min) %g (max)' %
(var, env[var].min(), env[var].max()))
logger.debug('---------------------------------')
logger.debug('\t\t%s active elements' % self.num_elements_active())
if self.num_elements_active() > 0:
lonmin = self.elements.lon.min()
lonmax = self.elements.lon.max()
latmin = self.elements.lat.min()
latmax = self.elements.lat.max()
zmin = self.elements.z.min()
zmax = self.elements.z.max()
if latmin == latmax:
logger.debug('\t\tlatitude = %s' % (latmin))
else:
logger.debug('\t\t%s <- latitude -> %s' %
(latmin, latmax))
if lonmin == lonmax:
logger.debug('\t\tlongitude = %s' % (lonmin))
else:
logger.debug('\t\t%s <- longitude -> %s' %
(lonmin, lonmax))
if zmin == zmax:
logger.debug('\t\tz = %s' % (zmin))
else:
logger.debug('\t\t%s <- z -> %s' % (zmin, zmax))
logger.debug('---------------------------------')
# Prepare array indiciating which elements contain any invalid values
missing = np.ma.masked_invalid(env[variables[0]]).mask
for var in variables[1:]:
missing = np.ma.mask_or(missing,
np.ma.masked_invalid(env[var]).mask,
shrink=False)
# Convert dictionary to recarray and return
if 'env_profiles' not in locals():
env_profiles = None
# Convert masked arrays to regular arrays for increased performance
env = np.array(env)
if env_profiles is not None:
for var in env_profiles:
env_profiles[var] = np.array(env_profiles[var])
self.timer_end('main loop:readers:postprocessing')
self.timer_end('main loop:readers')
return env.view(np.recarray), env_profiles, missing
def get_variables_along_trajectory(self, variables, lons, lats, times):
data = {'time': times, 'lon': lons, 'lat': lats}
for var in variables:
data[var] = np.zeros(len(times))
for i, time in enumerate(times):
self.time = time
d = self.get_environment(lon=np.atleast_1d(lons[i]),
lat=np.atleast_1d(lats[i]),
z=np.atleast_1d(0),
time=time,
variables=variables,
profiles=None)
for var in variables:
data[var][i] = d[0][var][0]
return data
def num_elements_active(self):
"""The number of active elements."""
if hasattr(self, 'elements'):
return len(self.elements)
else:
return 0
def num_elements_deactivated(self):
"""The number of deactivated elements."""
if hasattr(self, 'elements_deactivated'):
return len(self.elements_deactivated)
else:
return 0
def num_elements_scheduled(self):
if hasattr(self, 'elements_scheduled'):
return len(self.elements_scheduled)
else:
return 0
def num_elements_total(self):
"""The total number of scheduled, active and deactivated elements."""
return self.num_elements_activated() + self.num_elements_scheduled()
def num_elements_activated(self):
"""The total number of active and deactivated elements."""
return self.num_elements_active() + self.num_elements_deactivated()
def schedule_elements(self, elements, time):
"""Schedule elements to be seeded during runtime.
Also assigns a unique ID to each particle, monotonically increasing."""
# prepare time
if isinstance(time, np.ndarray):
time = list(time)
if not isinstance(time, list):
time = [time]
if len(time) == 1 and len(elements) > 1:
time = time * len(elements)
if not hasattr(self, 'elements_scheduled'):
self.elements_scheduled = elements
self.elements_scheduled_time = np.array(time)
# We start simulation at time of release of first element:
self.start_time = time[0]
self.elements_scheduled.ID = np.arange(1, len(elements) + 1)
else:
elements.ID = np.arange(self.num_elements_scheduled() + 1,
self.num_elements_scheduled() + 1 +
len(elements)) # Increase ID successively
self.elements_scheduled.extend(elements)
self.elements_scheduled_time = np.append(
self.elements_scheduled_time, np.array(time))
min_time = np.min(time)
if hasattr(self, 'start_time'):
if min_time < self.start_time:
self.start_time = min_time
logger.debug('Setting simulation start time to %s' %
str(min_time))
else:
self.start_time = min_time
logger.debug('Setting simulation start time to %s' % str(min_time))
def release_elements(self):
"""Activate elements which are scheduled within following timestep."""
logger.debug(
'to be seeded: %s, already seeded %s' %
(len(self.elements_scheduled), self.num_elements_activated()))
if len(self.elements_scheduled) == 0:
return
if self.time_step.days >= 0:
indices = (self.elements_scheduled_time >= self.time) & \
(self.elements_scheduled_time <
self.time + self.time_step)
else:
indices = (self.elements_scheduled_time <= self.time) & \
(self.elements_scheduled_time >
self.time + self.time_step)
self.store_present_positions(self.elements_scheduled.ID[indices],
self.elements_scheduled.lon[indices],
self.elements_scheduled.lat[indices])
self.elements_scheduled.move_elements(self.elements, indices)
self.elements_scheduled_time = self.elements_scheduled_time[~indices]
logger.debug('Released %i new elements.' % np.sum(indices))
def closest_ocean_points(self, lon, lat):
"""Return the closest ocean points for given lon, lat"""
deltalon = 0.01 # grid
deltalat = 0.01
numbuffer = 10
lonmin = lon.min() - deltalon * numbuffer
lonmax = lon.max() + deltalon * numbuffer
latmin = lat.min() - deltalat * numbuffer
latmax = lat.max() + deltalat * numbuffer
if not 'land_binary_mask' in self.priority_list:
logger.info('No land reader added, '
'making a temporary landmask reader')
from opendrift.models.oceandrift import OceanDrift
reader_landmask = reader_global_landmask.Reader(extent=[
np.maximum(-360,
self.elements_scheduled.lon.min() - deltalon),
np.maximum(-89,
self.elements_scheduled.lat.min() - deltalat),
np.minimum(720,
self.elements_scheduled.lon.max() + deltalon),
np.minimum(89,
self.elements_scheduled.lat.max() + deltalat)
])
o = OceanDrift(loglevel='custom')
if hasattr(self, 'simulation_extent'):
o.simulation_extent = self.simulation_extent
o.add_reader(reader_landmask)
land_reader = reader_landmask
else:
logger.info('Using existing reader for land_binary_mask')
land_reader_name = self.priority_list['land_binary_mask'][0]
land_reader = self.readers[land_reader_name]
o = self
land = o.get_environment(['land_binary_mask'],
lon=lon,
lat=lat,
z=0 * lon,
time=land_reader.start_time,
profiles=None)[0]['land_binary_mask']
if land.max() == 0:
logger.info('All points are in ocean')
return lon, lat
logger.info('Moving %i out of %i points from land to water' %
(np.sum(land != 0), len(lon)))
landlons = lon[land != 0]
landlats = lat[land != 0]
longrid = np.arange(lonmin, lonmax, deltalon)
latgrid = np.arange(latmin, latmax, deltalat)
longrid, latgrid = np.meshgrid(longrid, latgrid)
longrid = longrid.ravel()
latgrid = latgrid.ravel()
# Remove grid-points not covered by this reader
latgrid_covered = land_reader.covers_positions(longrid, latgrid)[0]
longrid = longrid[latgrid_covered]
latgrid = latgrid[latgrid_covered]
landgrid = o.get_environment(['land_binary_mask'],
lon=longrid,
lat=latgrid,
z=0 * longrid,
time=land_reader.start_time,
profiles=None)[0]['land_binary_mask']
if landgrid.min() == 1 or np.isnan(landgrid.min()):
logger.warning('No ocean pixels nearby, cannot move elements.')
return lon, lat
oceangridlons = longrid[landgrid == 0]
oceangridlats = latgrid[landgrid == 0]
from scipy import spatial
tree = scipy.spatial.cKDTree(
np.dstack([oceangridlons, oceangridlats])[0])
landpoints = np.dstack([landlons, landlats])
dist, indices = tree.query(landpoints)
indices = indices.ravel()
lon[land != 0] = oceangridlons[indices]
lat[land != 0] = oceangridlats[indices]
return lon, lat
def seed_elements(self,
lon,
lat,
time,
radius=0,
number=None,
radius_type='gaussian',
**kwargs):
"""Seed elements with given position(s), time and properties.
Arguments:
lon: scalar or array
central longitude(s).
lat: scalar or array
central latitude(s).
radius: scalar or array
radius in meters around each lon-lat pair,
within which particles will be randomly seeded.
number: integer, total number of particles to be seeded
If number is None, the number of elements is the
length of lon/lat or time if these are arrays. Otherwise
the number of elements are obtained from the config-default.
time: datenum or list
The time at which particles are seeded/released.
If time is a list with two elements, elements are seeded
continously from start/first to end/last time.
If time is a list with more than two elements, the number of elements
is equal to len(time) and are seeded as a time series.
radius_type: string
If 'gaussian' (default), the radius is the standard deviation in
x-y-directions. If 'uniform', elements are spread evenly and
always inside a circle with the given radius.
kwargs:
keyword arguments containing properties/attributes and
values corresponding to the actual particle type (ElementType).
These are forwarded to the ElementType class. All properties
for which there are no default value must be specified.
"""
if 'cone' in kwargs:
raise ValueError(
'Keyword *cone* for seed_elements is deprecated, use seed_cone() instead.'
)
if self.origin_marker is None:
self.origin_marker = {}
if 'origin_marker' in kwargs:
origin_marker = kwargs['origin_marker']
else:
origin_marker = len(self.origin_marker)
if 'origin_marker_name' in kwargs:
origin_marker_name = kwargs['origin_marker_name']
del kwargs['origin_marker_name']
else:
origin_marker_name = 'Seed %d' % len(self.origin_marker)
if not 'origin_marker' in kwargs:
kwargs['origin_marker'] = origin_marker
if '_' in origin_marker_name:
raise ValueError(
'Underscore (_) not allowed in origin_marker_name')
self.origin_marker[str(origin_marker)] = origin_marker_name.replace(
' ', '_')
lon = np.atleast_1d(lon).ravel()
lat = np.atleast_1d(lat).ravel()
radius = np.atleast_1d(radius).ravel()
time = np.atleast_1d(time)
if lat.max() > 90 or lat.min() < -90:
raise ValueError('Latitude must be between -90 and 90 degrees')
if len(lon) != len(lat):
raise ValueError('Lon and lat must have same lengths')
if len(lon) > 1:
if number is not None and number != len(lon):
raise ValueError(
'Lon and lat have length %s, but number is %s' %
(len(lon), number))
number = len(lon)
else:
if number is None:
if len(time) > 2:
number = len(time) # Interpreting as time series
else:
number = self.get_config('seed:number')
lon = lon * np.ones(number)
lat = lat * np.ones(number)
if len(time) != number and len(time) > 1:
if len(time) == 2: # start -> end
td = (time[1] - time[0]) / (number - 1
) # timestep between points
time = [time[0] + i * td for i in range(number)]
else:
raise ValueError(
'Time array has length %s, must be 1, 2 or %s' %
(len(time), number))
# Add radius / perturbation
if radius.max() > 0:
geod = pyproj.Geod(ellps='WGS84')
ones = np.ones(np.sum(number))
if radius_type == 'gaussian':
x = np.random.randn(np.sum(number)) * radius
y = np.random.randn(np.sum(number)) * radius
az = np.degrees(np.arctan2(x, y))
dist = np.sqrt(x * x + y * y)
elif radius_type == 'uniform':
az = np.random.randn(np.sum(number)) * 360
dist = np.sqrt(np.random.uniform(0, 1,
np.sum(number))) * radius
lon, lat, az = geod.fwd(lon, lat, az, dist, radians=False)
# If z is 'seafloor'
if not 'z' in kwargs or kwargs['z'] is None:
if 'seed:seafloor' in self._config:
if self.get_config('seed:seafloor') is True:
kwargs['z'] = 'seafloor'
logger.debug('Seafloor is selected, neglecting z')
if 'z' in kwargs and isinstance(kwargs['z'], str) \
and kwargs['z'][0:8] == 'seafloor':
# We need to fetch seafloor depth from reader
seafloor_constant = self.get_config(
'environment:constant:sea_floor_depth_below_sea_level')
seafloor_fallback = self.get_config(
'environment:fallback:sea_floor_depth_below_sea_level')
if seafloor_constant is not None:
env = {
'sea_floor_depth_below_sea_level':
np.array(seafloor_constant)
}
elif ('sea_floor_depth_below_sea_level'
in self.priority_list) or len(self._lazy_readers()):
if not hasattr(self, 'time'):
self.time = time[0]
env, env_profiles, missing = \
self.get_environment(['sea_floor_depth_below_sea_level'],
time=time[0], lon=lon, lat=lat,
z=0*lon, profiles=None)
elif seafloor_fallback is not None:
env = {
'sea_floor_depth_below_sea_level':
np.array(seafloor_fallback)
}
else:
raise ValueError('A reader providing the variable '
'sea_floor_depth_below_sea_level must be '
'added before seeding elements at seafloor.')
# Add M meters if given as 'seafloor+M'
if len(kwargs['z']) > 8 and kwargs['z'][8] == '+':
meters_above_seafloor = float(kwargs['z'][9::])
logger.info('Seeding elements %f meters above seafloor' %
meters_above_seafloor)
else:
meters_above_seafloor = 0
kwargs['z'] = \
-env['sea_floor_depth_below_sea_level'].astype('float32') + meters_above_seafloor
# Creating and scheduling elements
elements = self.ElementType(lon=lon, lat=lat, **kwargs)
time_array = np.array(time)
self.schedule_elements(elements, time)
def seed_cone(self, lon, lat, time, radius=0, number=None, **kwargs):
"""Seed elements along a transect/cone between two points/times
Arguments:
lon: scalar or list with 2 elements [lon0, lon1]
lat: scalar or list with 2 elements [lat0, lat]
time: datetime or list with 2 elements [t0, t1]
radius: scalar or list with 2 elements [r0, r1] Unit: meters
number (int): The number of elements. If this is None, the number of
elements is taken from configuration.
Elements are seeded along a transect from
(lon0, lat0) with uncertainty radius r0 at time t0, towards
(lon1, lat1) with uncertainty radius r1 at time t1.
If r0 != r1, the unceetainty radius is linearly changed along
the transect, thus outlining a "cone".
"""
if number is None:
number = self.get_config('seed:number')
if number == 1:
raise ValueError(
'For a cone, the number of elements must be at least 2 or more, given is 1'
)
lon = np.atleast_1d(lon).ravel()
lat = np.atleast_1d(lat).ravel()
radius = np.atleast_1d(radius).ravel()
if len(lon) != len(lat):
raise ValueError('Lon and lat must have same length (1 or 2)')
elif len(lon) > 2:
raise ValueError(
'Lon and lat must have length 1 or 2, given length is %s' %
(len(lon)))
elif len(lon) == 1:
lon = lon * np.ones(number)
lat = lat * np.ones(number)
elif len(lon) == 2: # Segment from lon0,lat1 to lon1,lat2
geod = pyproj.Geod(ellps='WGS84')
lonin = lon
latin = lat
# Note that npts places points in-between start and end, and does not include these
conelonlats = geod.npts(lon[0],
lat[0],
lon[1],
lat[1],
number,
radians=False)
lon, lat = zip(*conelonlats)
if len(radius) > 2:
raise ValueError('Seed radius must have length 1 or 2')
elif len(radius) == 2: # Linear increase from r0 to r1
radius = np.linspace(radius[0], radius[1], number)
if isinstance(time, list) and len(time) == 1:
time = time[0]
if hasattr(time, '__len__'):
timespan = [time[0], time[-1]]
else:
timespan = [time, time]
radius = radius.astype(np.float32)
lonin = lonin if 'lonin' in locals() else [lon.min(), lon.max()]
latin = latin if 'latin' in locals() else [lat.min(), lat.max()]
self.seed_cone_arguments = {
'lon': lonin,
'lat': latin,
'radius': [float(radius[0]), float(radius[-1])],
'time': timespan,
'number': number
}
# Make GeoJson seeding dict to be saved in netCDF metadata
geo = geojson.LineString([(float(lonin[0]), float(latin[0])),
(float(lonin[1]), float(latin[1]))])
seed_defaults = self.get_configspec('seed')
default_seed = {
k.split(':')[-1]: seed_defaults[k]['value']
for k in seed_defaults
}
if 'seafloor' in default_seed and default_seed['seafloor'] is True:
default_seed['z'] = 'seafloor'
default_seed = {
**default_seed,
**kwargs
} # Overwrite with explicitly provided values
properties = {
**default_seed, 'time': [str(timespan[0]),
str(timespan[1])],
'radius': [float(radius[0]), float(radius[-1])],
'number': number
}
f = geojson.Feature(geometry=geo, properties=properties)
self.seed_geojson.append(f)
# Forwarding calculated cone points/radii to seed_elements
self.seed_elements(lon=lon,
lat=lat,
time=time,
radius=radius,
number=number,
**kwargs)
def seed_from_geojson(self, gjson):
"""Under development"""
try:
gj = geojson.loads(gjson)
except:
raise ValueError('Could not load GeoJSON string: %s' % gjson)
if not gj.is_valid:
raise ValueError('GeoJSON string is not valid: %s' % gj.errors())
# Assuming temporally that g is a Feature, and not a FeatureCollection
properties = gj['properties']
if 'time' not in properties:
raise ValueError('Property "time" is not available')
kwargs = {}
for prop in properties:
if prop == 'time':
t = properties['time']
if isinstance(t, list):
time = [
datetime.fromisoformat(t[0].replace("Z", "+00:00")),
datetime.fromisoformat(t[1].replace("Z", "+00:00"))
]
else:
time = datetime.fromisoformat(t.replace("Z", "+00:00"))
else:
kwargs[prop] = properties[prop]
geometry = gj['geometry']
if geometry['type'] == 'Polygon':
coords = list(geojson.utils.coords(gj))
lon, lat = zip(*[(c[0], c[1]) for c in coords])
self.seed_within_polygon(lons=lon, lats=lat, time=time, **kwargs)
elif geometry['type'] == 'LineString':
coords = list(geojson.utils.coords(gj))
lon, lat = zip(*[(c[0], c[1]) for c in coords])
self.seed_cone(lon=lon, lat=lat, time=time, **kwargs)
elif geometry['type'] == 'Point':
coords = list(geojson.utils.coords(gj))
lon, lat = zip(*[(c[0], c[1]) for c in coords])
self.seed_elements(lon=lon, lat=lat, time=time, **kwargs)
else:
raise ValueError('Not yet implemented')
def seed_repeated_segment(self,
lons,
lats,
start_time,
end_time,
time_interval=None,
number_per_segment=None,
total_number=None,
**kwargs):
"""Seed elements repeatedly in time along a segment.
The segment goes from lon[0],lat[0] to lon[1],lat[1].
The number of elements should be proved as either:
1) number_per_segment, in which case total number of elements is number_per_segment * len(times), or
2) total_number, in which case the number of elements per segment is: total_number / len(times).
Any extra elements are duplicated along at the first segment.
"""
numtimes = int((end_time - start_time).total_seconds() /
time_interval.total_seconds() + 1)
times = [start_time + i * time_interval for i in range(numtimes)]
geod = pyproj.Geod(ellps='WGS84')
if number_per_segment is None:
number_per_segment = int(np.floor(total_number / numtimes))
s_lonlats = geod.npts(lons[0],
lats[0],
lons[1],
lats[1],
number_per_segment,
radians=False)
slon, slat = list(zip(*s_lonlats))
slon = np.atleast_1d(slon)
slat = np.atleast_1d(slat)
lon, time = np.meshgrid(slon, times)
lat, time = np.meshgrid(slat, times)
lon = lon.ravel()
lat = lat.ravel()
time = time.ravel()
if total_number is not None:
additional_elements = total_number - len(lon.ravel())
print('Repeating the %d last points, to obtain %d elements' %
(additional_elements, total_number))
lon = np.concatenate((lon, lon[-additional_elements::]))
lat = np.concatenate((lat, lat[-additional_elements::]))
time = np.concatenate((time, time[-additional_elements::]))
self.seed_elements(lon=lon, lat=lat, time=time, **kwargs)
def seed_within_polygon(self, lons, lats, number=None, **kwargs):
"""Seed a number of elements within given polygon.
Arguments:
lon: array of longitudes
lat: array of latitudes
number: int, number of elements to be seeded
kwargs: keyword arguments containing properties/attributes and
values corresponding to the actual particle type (ElementType).
These are forwarded to method seed_elements(). All properties
for which there are no default value must be specified.
"""
if number == 0:
return
if number is None:
number = self.get_config('seed:number')
lons = np.asarray(lons)
lats = np.asarray(lats)
if len(lons) < 3:
logger.info('At least three points needed to make a polygon')
return
if len(lons) != len(lats):
raise ValueError('lon and lat arrays must have same length.')
poly = Polygon(list(zip(lons, lats)), closed=True)
# Place N points within the polygons
proj = pyproj.Proj('+proj=aea +lat_1=%f +lat_2=%f +lat_0=%f '
'+lon_0=%f +R=6370997.0 +units=m +ellps=WGS84' %
(lats.min(), lats.max(),
(lats.min() + lats.max()) / 2,
(lons.min() + lons.max()) / 2))
lonlat = poly.get_xy()
lon = lonlat[:, 0]
lat = lonlat[:, 1]
x, y = proj(lon, lat)
area = 0.0
for i in range(-1, len(x) - 1):
area += x[i] * (y[i + 1] - y[i - 1])
area = abs(area) / 2
# Make points, evenly distributed
deltax = np.sqrt(area / number)
lonpoints = np.array([])
latpoints = np.array([])
lonlat = poly.get_xy()
lon = lonlat[:, 0]
lat = lonlat[:, 1]
x, y = proj(lon, lat)
xvec = np.linspace(x.min() + deltax / 2,
x.max() - deltax / 2,
int((x.max() - x.min()) / deltax))
yvec = np.linspace(y.min() + deltax / 2,
y.max() - deltax / 2,
int((y.max() - y.min()) / deltax))
x, y = np.meshgrid(xvec, yvec)
lon, lat = proj(x, y, inverse=True)
lon = lon.ravel()
lat = lat.ravel()
points = np.c_[lon, lat]
ind = Path(poly.xy).contains_points(points)
if not any(ind): # No elements are inside, we seed on border
lonpoints = np.append(lonpoints, lons[0:number])
latpoints = np.append(latpoints, lats[0:number])
else:
lonpoints = np.append(lonpoints, lon[ind])
latpoints = np.append(latpoints, lat[ind])
if len(ind) == 0:
logger.info('Small or irregular polygon, using center point.')
lonpoints = np.atleast_1d(np.mean(lons))
latpoints = np.atleast_1d(np.mean(lats))
# Truncate if too many
# NB: should also repeat some points, if too few
lonpoints = lonpoints[0:number]
latpoints = latpoints[0:number]
if len(lonpoints) < number:
# If number of positions is smaller than requested,
# we duplicate the first ones
missing = number - len(lonpoints)
lonpoints = np.append(lonpoints, lonpoints[0:missing])
latpoints = np.append(latpoints, latpoints[0:missing])
# Finally seed at calculated positions
self.seed_elements(lonpoints, latpoints, number=number, **kwargs)
def seed_from_wkt(self, wkt, number=None, **kwargs):
"""Seeds elements within (multi)polygons from WKT"""
try:
from osgeo import ogr, osr
except Exception as e:
logger.warning(e)
raise ValueError('OGR library is needed to parse WKT')
if number is None:
number = self.get_config('seed:number')
geom = ogr.CreateGeometryFromWkt(wkt)
total_area = 0
for i in range(0, geom.GetGeometryCount()):
g = geom.GetGeometryRef(i)
total_area += g.GetArea()
logger.info('Total area of all polygons: %s m2' % total_area)
num_seeded = 0
for i in range(0, geom.GetGeometryCount()):
g = geom.GetGeometryRef(i)
num_elements = int(number * g.GetArea() / total_area)
if i == geom.GetGeometryCount() - 1:
# For the last feature we seed the remaining number,
# avoiding difference due to rounding:
num_elements = number - num_seeded
logger.info('\tSeeding %s elements within polygon number %s' %
(num_elements, str(i)))
try:
g.Transform(coordTrans)
except:
pass
b = g.GetBoundary()
if b is not None:
points = b.GetPoints()
lons = [p[0] for p in points]
lats = [p[1] for p in points]
else:
# Alternative if OGR is not built with GEOS support
r = g.GetGeometryRef(0)
lons = [r.GetX(j) for j in range(r.GetPointCount())]
lats = [r.GetY(j) for j in range(r.GetPointCount())]
self.seed_within_polygon(lons=lons,
lats=lats,
number=num_elements,
**kwargs)
num_seeded += num_elements
def seed_from_shapefile(self,
shapefile,
number,
layername=None,
featurenum=None,
**kwargs):
"""Seeds elements within contours read from a shapefile"""
try:
from osgeo import ogr, osr
except Exception as e:
logger.warning(e)
raise ValueError('OGR library is needed to read shapefiles.')
if 'timeformat' in kwargs:
# Recondstructing time from filename, where 'timeformat'
# is forwarded to datetime.strptime()
kwargs['time'] = datetime.strptime(os.path.basename(shapefile),
kwargs['timeformat'])
del kwargs['timeformat']
num_seeded_before = self.num_elements_scheduled()
targetSRS = osr.SpatialReference()
targetSRS.ImportFromEPSG(4326)
try:
s = ogr.Open(shapefile)
except:
s = shapefile
for layer in s:
if layername is not None and layer.GetName() != layername:
logger.info('Skipping layer: ' + layer.GetName())
continue
else:
logger.info('Seeding for layer: %s (%s features)' %
(layer.GetDescription(), layer.GetFeatureCount()))
coordTrans = osr.CoordinateTransformation(layer.GetSpatialRef(),
targetSRS)
if featurenum is None:
featurenum = range(1, layer.GetFeatureCount() + 1)
else:
featurenum = np.atleast_1d(featurenum)
if max(featurenum) > layer.GetFeatureCount():
raise ValueError('Only %s features in layer.' %
layer.GetFeatureCount())
# Loop first through all features to determine total area
layer.ResetReading()
area_srs = osr.SpatialReference()
area_srs.ImportFromEPSG(3857)
areaTransform = osr.CoordinateTransformation(
layer.GetSpatialRef(), area_srs)
areas = np.zeros(len(featurenum))
for i, f in enumerate(featurenum):
feature = layer.GetFeature(f - 1) # Note 1-indexing, not 0
if feature is not None:
gom = feature.GetGeometryRef().Clone()
gom.Transform(areaTransform)
areas[i] = gom.GetArea()
total_area = np.sum(areas)
layer.ResetReading() # Rewind to first layer
logger.info('Total area of all polygons: %s m2' % total_area)
# Find number of points per polygon
numbers = np.round(number * areas / total_area).astype(int)
numbers[numbers.argmax()] += int(number - sum(numbers))
for i, f in enumerate(featurenum):
feature = layer.GetFeature(f - 1)
if feature is None:
continue
num_elements = numbers[i]
geom = feature.GetGeometryRef()
logger.info('\tSeeding %s elements within polygon number %s' %
(num_elements, featurenum[i]))
try:
geom.Transform(coordTrans)
except Exception as e:
logger.warning('Could not transform coordinates:')
logger.warning(e)
pass
#b = geom.GetBoundary()
#if b is not None:
# points = b.GetPoints()
# lons = [p[0] for p in points]
# lats = [p[1] for p in points]
#else:
# Alternative if OGR is not built with GEOS support
r = geom.GetGeometryRef(0)
lons = [r.GetY(j) for j in range(r.GetPointCount())]
lats = [r.GetX(j) for j in range(r.GetPointCount())]
self.seed_within_polygon(lons=lons,
lats=lats,
number=num_elements,
**kwargs)
def seed_letters(self, text, lon, lat, time, number, scale=1.2):
"""Seed elements within text polygons"""
from matplotlib.font_manager import FontProperties
fp = FontProperties(family='Bitstream Vera Sans', weight='bold')
pol = matplotlib.textpath.TextPath((lon, lat),
text,
size=1 * scale,
prop=fp)
patch = matplotlib.patches.PathPatch(pol,
facecolor='none',
edgecolor='black',
transform=ccrs.PlateCarree())
po = patch.get_path().to_polygons()
for p in po:
self.seed_within_polygon(lons=p[:, 0],
lats=p[:, 1],
number=number,
time=time)
def seed_from_ladim(self, ladimfile, roms):
"""Seed elements from ladim \\*.rls text file: [time, x, y, z, name]"""
data = np.loadtxt(ladimfile,
dtype={
'names': ('time', 'x', 'y', 'z'),
'formats': ('S20', 'f4', 'f4', 'f4')
},
usecols=(0, 1, 2, 3))
time = [datetime.strptime(t, "%Y-%m-%dT%H") for t in data['time']]
time = np.array(time)
lon, lat = roms.xy2lonlat(data['x'], data['y'])
z = -data['z']
logger.info('Seeding %i elements from %s:' % (len(lon), ladimfile))
logger.info(' Lons: %f to %f' % (lon.min(), lon.max()))
logger.info(' Lats: %f to %f' % (lat.min(), lat.max()))
logger.info(' Depths: %f to %f' % (z.min(), z.max()))
logger.info(' Time: %s to %s' % (time.min(), time.max()))
elements = self.ElementType(lon=lon, lat=lat, z=-z)
self.schedule_elements(elements, time)
def horizontal_diffusion(self):
"""Move elements with random walk according to given horizontal diffuivity."""
D = self.get_config('drift:horizontal_diffusivity')
if D == 0:
logger.debug('Horizontal diffusivity is 0, no random walk.')
return
dt = np.abs(self.time_step.total_seconds())
x_vel = self.elements.moving * np.sqrt(2*D/dt) * np.random.normal(
scale=1, size=self.num_elements_active())
y_vel = self.elements.moving * np.sqrt(2*D/dt) * np.random.normal(
scale=1, size=self.num_elements_active())
speed = np.sqrt(x_vel * x_vel + y_vel * y_vel)
logger.debug(
'Moving elements according to horizontal diffusivity of %s, with speeds between %s and %s m/s'
% (D, speed.min(), speed.max()))
self.update_positions(x_vel, y_vel)
def deactivate_elements(self, indices, reason='deactivated'):
"""Schedule deactivated particles for deletion (at end of step)"""
if any(indices) is False:
return
if reason not in self.status_categories:
self.status_categories.append(reason)
logger.debug('Added status %s' % (reason))
reason_number = self.status_categories.index(reason)
#if not hasattr(self.elements.status, "__len__"):
if len(np.atleast_1d(self.elements.status)) == 1:
status = self.elements.status.item()
self.elements.status = np.zeros(self.num_elements_active())
self.elements.status.fill(status)
# Deactivate elements, if they have not already been deactivated
self.elements.status[indices & (self.elements.status ==0)] = \
reason_number
self.elements.moving[indices] = 0
logger.debug('%s elements scheduled for deactivation (%s)' %
(np.sum(indices), reason))
logger.debug(
'\t(z: %f to %f)' %
(self.elements.z[indices].min(), self.elements.z[indices].max()))
def remove_deactivated_elements(self):
"""Moving deactivated elements from self.elements
to self.elements_deactivated."""
# All particles scheduled for deletion
indices = (self.elements.status != 0)
#try:
# len(indices)
#except:
if len(indices) == 0 or np.sum(indices) == 0:
logger.debug('No elements to deactivate')
return # No elements scheduled for deactivation
# Basic, but some more housekeeping will be required later
self.elements.move_elements(self.elements_deactivated, indices)
logger.debug('Removed %i elements.' % (np.sum(indices)))
if hasattr(self, 'environment'):
self.environment = self.environment[~indices]
logger.debug('Removed %i values from environment.' %
(np.sum(indices)))
if hasattr(self, 'environment_profiles') and \
self.environment_profiles is not None:
for varname, profiles in self.environment_profiles.items():
logger.debug('remove items from profile for ' + varname)
if varname != 'z':
self.environment_profiles[varname] = \
profiles[:, ~indices]
logger.debug('Removed %i values from environment_profiles.' %
(np.sum(indices)))
#if self.num_elements_active() == 0:
# raise ValueError('No more active elements.') # End simulation
def set_fallback_values(self, refresh=False):
if hasattr(self, 'fallback_values') and refresh is False:
raise ValueError(
'Manually editing fallback_values dict is deprecated, please use set_config()'
)
else:
c = self.get_configspec('environment:fallback:')
self.fallback_values = {}
for var in list(c):
if c[var]['value'] is not None:
self.fallback_values[var.split(':')[-1]] = c[var]['value']
def run(self,
time_step=None,
steps=None,
time_step_output=None,
duration=None,
end_time=None,
outfile=None,
export_variables=None,
export_buffer_length=100,
stop_on_error=False):
"""Start a trajectory simulation, after initial configuration.
Performs the main loop:
- Obtain environment data for positions of all particles.
- Call method 'update' to update (incl advect) particle properties.
until one of the following conditions are met:
- Maximum number of steps are reached
- A needed variable can not be obtained by any reader
(outside spatial/temporal domain) and has no fallback
(default) value.
- All particles have been deactivated (e.g. by stranding)
- Occurance of any error, whose trace will be output to terminal.
Before starting a model run, readers must be added for all
required variables, unless fallback values have been specified.
Some particles/elements must have been scheduled for seeding, and the
run will start at the time when the first element has been scheduled..
Arguments:
time_step: interval between particles updates, in seconds or as
timedelta. Default: 3600 seconds (1 hour)
time_step_output: Time step at which element properties are stored
and eventually written to file.
Timedelta object or seconds.
Default: same as time_step, meaning that all steps are stored
The length of the simulation is specified by defining one
(and only one) of the following parameters:
- steps: integer, maximum number of steps. End of simulation
will be self.start_time + steps*self.time_step
- duration: timedelta defining the length of the simulation
- end_time: datetime object defining the end of the simulation
export_variables: list of variables and parameter names to be
saved to file. Default is None (all variables are saved)
"""
# Exporting software and hardware specification, for possible debugging
logger.debug(opendrift.versions())
self.timer_end('configuration')
self.timer_start('preparing main loop')
if self.num_elements_scheduled() == 0:
raise ValueError('Please seed elements before starting a run.')
self.elements = self.ElementType()
# Export seed_geojson as FeatureCollection string
self.add_metadata('seed_geojson',
geojson.FeatureCollection(self.seed_geojson))
# Collect fallback values from config into dict
self.set_fallback_values(refresh=True)
if outfile is None and export_buffer_length is not None:
logger.debug('No output file is specified, '
'neglecting export_buffer_length')
export_buffer_length = None
# Make constant readers if config environment:constant:<var> is
c = self.get_configspec('environment:constant:')
mr = {}
for var in list(c):
if c[var]['value'] is not None:
mr[var.split(':')[-1]] = c[var]['value']
if len(mr) > 0:
from opendrift.readers import reader_constant
rc = reader_constant.Reader(mr)
self.add_reader(rc, first=True)
missing_variables = self.missing_variables()
missing_variables = [
m for m in missing_variables if m != 'land_binary_mask'
]
if len(missing_variables) > 0:
has_fallback = [
var for var in missing_variables if var in self.fallback_values
]
has_no_fallback = [
var for var in missing_variables
if var not in self.fallback_values
]
#if has_fallback == missing_variables:
if len(has_fallback) > 0: # == missing_variables:
logger.info('Fallback values will be used for the following '
'variables which have no readers: ')
for var in has_fallback:
logger.info('\t%s: %f' % (var, self.fallback_values[var]))
#else:
if len(has_no_fallback) > 0 and len(
self._lazy_readers()) == 0: # == missing_variables:
logger.warning(
'No readers added for the following variables: ' +
str(has_no_fallback))
raise ValueError('Readers must be added for the '
'following required variables: ' +
str(has_no_fallback))
# Some cleanup needed if starting from imported state
if self.steps_calculation >= 1:
self.steps_calculation = 0
if self.history is not None:
# Delete history matrix before new run
self.history = None
# Renumbering elements from 0 to num_elements, necessary fix when
# importing from file, where elements may have been deactivated
# TODO: should start from 1?
self.elements.ID = np.arange(0, self.num_elements_active())
########################
# Simulation time step
########################
if time_step is None:
time_step = timedelta(
minutes=self.get_config('general:time_step_minutes'))
if type(time_step) is not timedelta:
# Time step may be given in seconds, as alternative to timedelta
time_step = timedelta(seconds=time_step)
self.time_step = time_step
if time_step_output is None:
time_step_output = self.get_config(
'general:time_step_output_minutes')
if time_step_output is None:
self.time_step_output = self.time_step
else:
self.time_step_output = timedelta(minutes=time_step_output)
else:
if type(time_step_output) is timedelta:
self.time_step_output = time_step_output
else:
self.time_step_output = timedelta(seconds=time_step_output)
if self.time_step_output.days >= 0 and self.time_step.days < 0:
self.time_step_output = -self.time_step_output
time_step_ratio = self.time_step_output.total_seconds() / \
self.time_step.total_seconds()
if time_step_ratio < 1:
raise ValueError('Output time step must be equal or larger '
'than calculation time step.')
if not time_step_ratio.is_integer():
raise ValueError('Ratio of calculation and output time steps '
'must be an integer - given ratio is %s' %
time_step_ratio)
########################
# Simulation duration
########################
if time_step.days < 0:
logger.info(
'Backwards simulation, starting from last seeded element')
self.start_time = self.elements_scheduled_time.max()
if (duration is not None and end_time is not None) or \
(duration is not None and steps is not None) or \
(steps is not None and end_time is not None):
raise ValueError('Only one of "steps", "duration" and "end_time" '
'may be provided simultaneously')
if duration is None and end_time is None:
if steps is not None:
duration = steps * self.time_step
else:
for reader in self.readers.values():
if reader.end_time is not None:
if end_time is None:
end_time = reader.end_time
else:
end_time = min(end_time, reader.end_time)
logger.info('Duration, steps or end time not specified, '
'running until end of first reader: %s' %
(end_time))
if duration is None:
duration = end_time - self.start_time
if time_step.days < 0 and duration.days >= 0:
# Duration shall also be negative for backwards run
duration = -duration
if np.sign(duration.total_seconds()) * np.sign(
time_step.total_seconds()) < 0:
raise ValueError(
"Time step must be negative if duration is negative.")
self.expected_steps_output = duration.total_seconds() / \
self.time_step_output.total_seconds() + 1 # Includes start and end
self.expected_steps_calculation = duration.total_seconds() / \
self.time_step.total_seconds()
self.expected_steps_output = int(self.expected_steps_output)
self.expected_steps_calculation = int(self.expected_steps_calculation)
self.expected_end_time = self.start_time + self.expected_steps_calculation * self.time_step
##############################################################
# Prepare readers for the requested simulation domain/time
##############################################################
max_distance = \
self.max_speed*self.expected_steps_calculation * \
np.abs(self.time_step.total_seconds())
deltalat = max_distance / 111000.
deltalon = deltalat / np.cos(
np.radians(np.mean(self.elements_scheduled.lat)))
# TODO: extent should ideally be a general polygon, not only lon/lat-min/max
# TODO: Should also take into account eventual lifetime of elements
simulation_extent = [
np.maximum(-360,
self.elements_scheduled.lon.min() - deltalon),
np.maximum(-89,
self.elements_scheduled.lat.min() - deltalat),
np.minimum(360,
self.elements_scheduled.lon.max() + deltalon),
np.minimum(89,
self.elements_scheduled.lat.max() + deltalat)
]
if simulation_extent[2] == 360 and simulation_extent[0] < 0:
simulation_extent[0] = 0
logger.debug(
'Preparing readers for simulation coverage (%s) and time (%s to %s)'
% (simulation_extent, self.start_time, self.expected_end_time))
for reader in self.readers.values():
logger.debug('\tPreparing %s' % reader.name)
reader.prepare(extent=simulation_extent,
start_time=self.start_time,
end_time=self.expected_end_time,
max_speed=self.max_speed)
# Store expected simulation extent, to check if new readers have coverage
self.simulation_extent = simulation_extent
##############################################################
# If no landmask has been added, we determine it dynamically
##############################################################
# TODO: some more error checking here
# If landmask is requested, it shall not be obtained from other readers
if self.get_config('general:use_auto_landmask') is True:
if 'land_binary_mask' in self.priority_list:
if 'basemap_landmask' in self.priority_list[
'land_binary_mask']:
self.priority_list['land_binary_mask'] = [
'basemap_landmask'
]
elif 'global_landmask' in self.priority_list[
'land_binary_mask']:
self.priority_list['land_binary_mask'] = [
'global_landmask'
]
else:
del self.priority_list['land_binary_mask']
if self.get_config('general:use_auto_landmask') is True and \
('land_binary_mask' in self.required_variables and \
'land_binary_mask' not in self.priority_list \
and 'land_binary_mask' not in self.fallback_values):
logger.info(
'Adding a dynamical landmask with max. priority based on '
'assumed maximum speed of %s m/s. '
'Adding a customised landmask may be faster...' %
self.max_speed)
self.timer_start('preparing main loop:making dynamical landmask')
reader_landmask = reader_global_landmask.Reader(
extent=simulation_extent)
self.add_reader(reader_landmask)
self.timer_end('preparing main loop:making dynamical landmask')
####################################################################
# Preparing history array for storage in memory and eventually file
####################################################################
if export_buffer_length is None:
self.export_buffer_length = self.expected_steps_output
else:
self.export_buffer_length = export_buffer_length
if self.time_step.days < 0:
# For backwards simulation, we start at last seeded element
logger.info('Backwards simulation, starting at '
'time of last seeded element')
self.time = self.elements_scheduled_time.max()
# Flipping ID array, so that lowest IDs are released first
self.elements_scheduled.ID = \
np.flipud(self.elements_scheduled.ID)
else:
# Forward simulation, start time has been set when seeding
self.time = self.start_time
# Add the output variables which are always required
if export_variables is not None:
export_variables = list(
set(export_variables + ['lon', 'lat', 'ID', 'status']))
self.export_variables = export_variables
# Initialise array to hold history (element properties and environment)
# for export to file.
history_dtype_fields = [(name,
self.ElementType.variables[name]['dtype'])
for name in self.ElementType.variables]
# Add environment variables
self.history_metadata = self.ElementType.variables.copy()
for env_var in self.required_variables:
history_dtype_fields.append((env_var, np.dtype('float32')))
self.history_metadata[env_var] = {}
# Remove variables from output array, if only subset is requested
if self.export_variables is not None:
history_dtype_fields = [
f for f in history_dtype_fields
if f[0] in self.export_variables
]
for m in list(self.history_metadata):
if m not in self.export_variables:
del self.history_metadata[m]
history_dtype = np.dtype(history_dtype_fields)
self.history = np.ma.array(np.zeros(
(len(self.elements_scheduled), self.export_buffer_length)),
dtype=history_dtype)
self.history.mask = True
self.steps_exported = 0
if outfile is not None:
self.io_init(outfile)
else:
self.outfile = None
# Move point seeded on land to ocean
if self.get_config('seed:ocean_only') is True and \
('land_binary_mask' in self.required_variables):
#('land_binary_mask' not in self.fallback_values) and \
self.timer_start('preparing main loop:moving elements to ocean')
self.elements_scheduled.lon, self.elements_scheduled.lat = \
self.closest_ocean_points(self.elements_scheduled.lon,
self.elements_scheduled.lat)
self.timer_end('preparing main loop:moving elements to ocean')
#############################
# Check validity domain
#############################
validity_domain = [
self.get_config('drift:deactivate_west_of'),
self.get_config('drift:deactivate_east_of'),
self.get_config('drift:deactivate_south_of'),
self.get_config('drift:deactivate_north_of')
]
if validity_domain == [None, None, None, None]:
self.validity_domain = None
else:
self.validity_domain = validity_domain
#############################
# Model specific preparation
#############################
self.prepare_run()
##########################
# Main loop
##########################
self.add_metadata('simulation_time', datetime.now())
self.timer_end('preparing main loop')
self.timer_start('main loop')
for i in range(self.expected_steps_calculation):
try:
# Release elements
self.release_elements()
if self.num_elements_active(
) == 0 and self.num_elements_scheduled() > 0:
self.steps_calculation += 1
logger.info(
'No active but %s scheduled elements, skipping timestep %s (%s)'
% (self.num_elements_scheduled(),
self.steps_calculation, self.time))
self.state_to_buffer() # Append status to history array
if self.time is not None:
self.time = self.time + self.time_step
continue
self.increase_age_and_retire()
self.interact_with_seafloor()
if self.show_continuous_performance is True:
logger.info(self.performance())
# Display time to terminal
logger.debug('===================================' * 2)
logger.info('%s - step %i of %i - %i active elements '
'(%i deactivated)' %
(self.time, self.steps_calculation + 1,
self.expected_steps_calculation,
self.num_elements_active(),
self.num_elements_deactivated()))
logger.debug('%s elements scheduled.' %
self.num_elements_scheduled())
logger.debug('===================================' * 2)
self.environment, self.environment_profiles, missing = \
self.get_environment(list(self.required_variables),
self.time,
self.elements.lon,
self.elements.lat,
self.elements.z,
self.required_profiles)
self.store_previous_variables()
self.calculate_missing_environment_variables()
if any(missing):
self.report_missing_variables()
self.interact_with_coastline()
self.interact_with_seafloor()
self.deactivate_elements(missing, reason='missing_data')
self.state_to_buffer() # Append status to history array
self.remove_deactivated_elements()
# Propagate one timestep forwards
self.steps_calculation += 1
if self.num_elements_active(
) == 0 and self.num_elements_scheduled() == 0:
raise ValueError(
'No more active or scheduled elements, quitting.')
# Store location, in case elements shall be moved back
self.store_present_positions()
#####################################################
if self.num_elements_active() > 0:
logger.debug('Calling %s.update()' % type(self).__name__)
self.timer_start('main loop:updating elements')
self.update()
self.timer_end('main loop:updating elements')
else:
logger.info('No active elements, skipping update() method')
#####################################################
self.horizontal_diffusion()
if self.num_elements_active(
) == 0 and self.num_elements_scheduled() == 0:
raise ValueError(
'No active or scheduled elements, quitting simulation')
logger.debug('%s active elements (%s deactivated)' %
(self.num_elements_active(),
self.num_elements_deactivated()))
# Updating time
if self.time is not None:
self.time = self.time + self.time_step
except Exception as e:
message = ('The simulation stopped before requested '
'end time was reached.')
logger.warning(message)
self.store_message(message)
logger.info('========================')
logger.info('End of simulation:')
logger.info(e)
logger.info(traceback.format_exc())
logger.info(self.get_messages())
if not hasattr(self, 'environment'):
sys.exit('Simulation aborted. ' + self.get_messages())
logger.info('========================')
if stop_on_error is True:
sys.exit('Stopping on error. ' + self.get_messages())
if self.steps_calculation <= 1:
raise ValueError('Simulation stopped within '
'first timestep. ' + self.get_messages())
break
self.timer_end('main loop')
self.timer_start('cleaning up')
logger.debug('Cleaning up')
self.interact_with_coastline(final=True)
self.state_to_buffer() # Append final status to buffer
#############################
# Add some metadata
#############################
for var in self.required_variables:
keyword = 'reader_' + var
if var not in self.priority_list:
self.add_metadata(keyword, self.fallback_values[var])
else:
readers = self.priority_list[var]
if readers[0].startswith(
'constant_reader') and var in self.readers[
readers[0]]._parameter_value_map:
self.add_metadata(
keyword,
self.readers[readers[0]]._parameter_value_map[var][0])
else:
self.add_metadata(keyword, self.priority_list[var])
if outfile is not None:
logger.debug('Writing and closing output file: %s' % outfile)
# Write buffer to outfile, and close
if self.steps_output >= self.steps_exported:
# Write last lines, if needed
self.io_write_buffer()
self.io_close()
# Remove any elements scheduled for deactivation during last step
self.remove_deactivated_elements()
if export_buffer_length is None:
# Remove columns for unseeded elements in history array
if self.num_elements_scheduled() > 0:
logger.info(
'Removing %i unseeded elements from history array' %
self.num_elements_scheduled())
mask = np.ones(self.history.shape[0], dtype=bool)
mask[self.elements_scheduled.ID - 1] = False
self.history = self.history[mask, :]
# Remove rows for unreached timsteps in history array
self.history = self.history[:, range(self.steps_output)]
else: # If output has been flushed to file during run, we
# need to reimport from file to get all data in memory
del self.environment
if hasattr(self, 'environment_profiles'):
del self.environment_profiles
self.io_import_file(outfile)
self.timer_end('cleaning up')
self.timer_end('total time')
def increase_age_and_retire(self):
"""Increase age of elements, and retire if older than config setting."""
# Increase age of elements
self.elements.age_seconds += self.time_step.total_seconds()
# Deactivate elements that exceed a certain age
if self.get_config('drift:max_age_seconds') is not None:
self.deactivate_elements(self.elements.age_seconds >=
self.get_config('drift:max_age_seconds'),
reason='retired')
# Deacticate any elements outside validity domain set by user
if self.validity_domain is not None:
W, E, S, N = self.validity_domain
if W is not None:
self.deactivate_elements(self.elements.lon < W,
reason='outside')
if E is not None:
self.deactivate_elements(self.elements.lon > E,
reason='outside')
if S is not None:
self.deactivate_elements(self.elements.lat < S,
reason='outside')
if N is not None:
self.deactivate_elements(self.elements.lat > N,
reason='outside')
def state_to_buffer(self):
"""Append present state (elements and environment) to recarray."""
steps_calculation_float = \
(self.steps_calculation * self.time_step.total_seconds() /
self.time_step_output.total_seconds()) + 1
if self.time_step <= timedelta(seconds=1):
self.steps_output = int(np.round(steps_calculation_float))
else:
self.steps_output = int(np.floor(steps_calculation_float))
ID_ind = self.elements.ID - 1
time_ind = self.steps_output - 1 - self.steps_exported
if self.steps_calculation == self.expected_steps_calculation:
final_time_step = True
else:
final_time_step = False
if steps_calculation_float.is_integer() or self.time_step < timedelta(
seconds=1) or final_time_step is True:
element_ind = range(len(ID_ind)) # We write all elements
else:
deactivated = np.where(self.elements.status != 0)[0]
if len(deactivated) == 0:
return # No deactivated elements this sub-timestep
# We write history for deactivated elements only:
logger.debug('Writing history for %s deactivated elements' %
len(deactivated))
ID_ind = ID_ind[deactivated]
element_ind = deactivated
time_ind = np.minimum(time_ind + 1, self.history.shape[1] - 1)
# TODO: storing of variables and environment below should be collected in a single loop
# Store present state in history recarray
for i, var in enumerate(self.elements.variables):
if self.export_variables is not None and \
var not in self.export_variables:
continue
# Temporarily assuming elements numbered
# from 0 to num_elements_active()
# Does not hold when importing ID from a saved file, where
# some elements have been deactivated
self.history[var][ID_ind, time_ind] = \
getattr(self.elements, var)[element_ind]
if len(ID_ind) > 0:
newmin = np.min(self.history[var][ID_ind, time_ind])
newmax = np.max(self.history[var][ID_ind, time_ind])
if var not in self.minvals:
self.minvals[var] = newmin
self.maxvals[var] = newmax
else:
self.minvals[var] = np.minimum(self.minvals[var], newmin)
self.maxvals[var] = np.maximum(self.maxvals[var], newmax)
# Copy environment data to history array
for i, var in enumerate(self.environment.dtype.names):
if self.export_variables is not None and \
var not in self.export_variables:
continue
self.history[var][ID_ind, time_ind] = \
getattr(self.environment, var)[element_ind]
if len(ID_ind) > 0:
newmin = np.min(self.history[var][ID_ind, time_ind])
newmax = np.max(self.history[var][ID_ind, time_ind])
if var not in self.minvals:
self.minvals[var] = newmin
self.maxvals[var] = newmax
else:
self.minvals[var] = np.minimum(self.minvals[var], newmin)
self.maxvals[var] = np.maximum(self.maxvals[var], newmax)
# Call writer if buffer is full
if (self.outfile is not None) and \
((self.steps_output - self.steps_exported) ==
self.export_buffer_length):
self.io_write_buffer()
def report_missing_variables(self):
"""Issue warning if some environment variables missing."""
missing_variables = []
for var in self.required_variables:
if np.isnan(getattr(self.environment, var).min()):
missing_variables.append(var)
if len(missing_variables) > 0:
logger.warning('Missing variables: ' + str(missing_variables))
self.store_message('Missing variables: ' + str(missing_variables))
def index_of_activation_and_deactivation(self):
"""Return the indices when elements were seeded and deactivated."""
firstlast = np.ma.notmasked_edges(self.history['lon'], axis=1)
index_of_activation = firstlast[0][1]
index_of_deactivation = firstlast[1][1]
if len(index_of_deactivation) < self.history['lon'].shape[0]:
missingind = np.setdiff1d(
np.arange(0, self.history['lon'].shape[0]), firstlast[0][0])
logger.warning(
'%s elements were never seeded, removing from history array (this is probably caused by importing an old file)'
% len(missingind))
self.history = self.history[firstlast[0][0], :]
return index_of_activation, index_of_deactivation
def set_up_map(self,
corners=None,
buffer=.1,
delta_lat=None,
lscale=None,
fast=False,
hide_landmask=False,
**kwargs):
"""
Generate Figure instance on which trajectories are plotted.
:param hide_landmask: do not plot landmask (default False)
:type hide_landmask: bool
provide corners=[lonmin, lonmax, latmin, latmax] for specific map selection
"""
# Initialise map
if hasattr(self, 'ds'): # If dataset is lazily imported
lons = self.ds.lon
lats = self.ds.lat
if not hasattr(self, 'lonmin'):
logger.debug('Finding min longitude...')
self.lonmin = np.nanmin(self.ds.lon)
logger.debug('Finding max longitude...')
self.lonmax = np.nanmax(self.ds.lon)
logger.debug('Finding min latitude...')
self.latmin = np.nanmin(self.ds.lat)
logger.debug('Finding max latitude...')
self.latmax = np.nanmax(self.ds.lat)
else:
lons, lats = self.get_lonlats() # TODO: to be removed
if corners is not None: # User provided map corners
lonmin = corners[0]
lonmax = corners[1]
latmin = corners[2]
latmax = corners[3]
elif hasattr(self, 'lonmin'): # if dataset is lazily imported
lonmin = self.lonmin - buffer * 2
lonmax = self.lonmax + buffer * 2
latmin = self.latmin - buffer
latmax = self.latmax + buffer
else:
lons, lats = self.get_lonlats()
if 'compare_lonmin' in kwargs: # checking min/max lon/lat of other simulations
lonmin = np.minimum(kwargs['compare_lonmin'], np.nanmin(lons))
lonmax = np.maximum(kwargs['compare_lonmax'], np.nanmax(lons))
latmin = np.minimum(kwargs['compare_latmin'], np.nanmin(lats))
latmax = np.maximum(kwargs['compare_latmax'], np.nanmax(lats))
else:
lonmin = np.nanmin(lons)
lonmax = np.nanmax(lons)
latmin = np.nanmin(lats)
latmax = np.nanmax(lats)
lonmin = lonmin - buffer * 2
lonmax = lonmax + buffer * 2
latmin = latmin - buffer
latmax = latmax + buffer
if fast is True:
logger.warning(
'Plotting fast. This will make your plots less accurate.')
import matplotlib.style as mplstyle
mplstyle.use(['fast'])
# use a spherical earth
axis = 57.29577951308232 # something to do with pi
globe = ccrs.Globe(ellipse=None,
semimajor_axis=axis,
semiminor_axis=axis)
crs = ccrs.Mercator(globe=globe)
if lscale is None:
lscale = 'c'
else:
crs = ccrs.Mercator()
if lscale is None:
lscale = 'auto'
meanlat = (latmin + latmax) / 2
aspect_ratio = float(latmax - latmin) / (float(lonmax - lonmin))
aspect_ratio = aspect_ratio / np.cos(np.radians(meanlat))
if aspect_ratio > 1:
fig = plt.figure(figsize=(11. / aspect_ratio, 11.))
else:
fig = plt.figure(figsize=(11., 11. * aspect_ratio))
ax = fig.add_subplot(111, projection=crs) # need '111' for Python 2
ax.set_extent([lonmin, lonmax, latmin, latmax], crs=ccrs.PlateCarree())
if 'ocean_color' in kwargs:
ax.patch.set_facecolor(kwargs['ocean_color'])
ocean_color = kwargs['ocean_color']
else:
ocean_color = 'white'
if 'land_color' in kwargs:
land_color = kwargs['land_color']
else:
if fast is True:
land_color = 'gray'
else:
land_color = cfeature.COLORS['land']
if 'text' in kwargs:
if not isinstance(kwargs['text'], list):
text = list(kwargs['text'])
else:
text = kwargs['text']
for te in text:
plt.text(transform=ccrs.Geodetic(), **te)
if 'box' in kwargs:
if not isinstance(kwargs['box'], list):
box = list(kwargs['box'])
else:
box = kwargs['box']
for bx in box:
lonmn = bx['lon'][0]
lonmx = bx['lon'][1]
latmn = bx['lat'][0]
latmx = bx['lat'][1]
del bx['lon']
del bx['lat']
if 'text' in bx:
plt.text(x=lonmn,
y=latmx,
s=bx['text'],
transform=ccrs.Geodetic())
del bx['text']
patch = matplotlib.patches.Rectangle(xy=[lonmn, latmn],
width=lonmx - lonmn,
height=latmx - latmn,
transform=ccrs.Geodetic(),
zorder=10,
**bx)
ax.add_patch(patch)
if not hide_landmask:
if 'land_binary_mask' in self.priority_list and self.priority_list[
'land_binary_mask'][0] == 'shape':
logger.debug('Using custom shapes for plotting land..')
ax.add_geometries(self.readers['shape'].polys,
ccrs.PlateCarree(),
facecolor=land_color,
edgecolor='black')
else:
reader_global_landmask.plot_land(ax, lonmin, latmin, lonmax,
latmax, fast, ocean_color,
land_color, lscale)
gl = ax.gridlines(ccrs.PlateCarree(), draw_labels=True)
if cartopy.__version__ < '0.18.0':
gl.xlabels_top = False # Cartopy < 0.18
else:
gl.top_labels = None # Cartopy >= 0.18
fig.canvas.draw()
fig.set_tight_layout(True)
if not hasattr(self, 'ds'):
try:
firstlast = np.ma.notmasked_edges(lons, axis=1)
index_of_first = firstlast[0][1]
index_of_last = firstlast[1][1]
except:
index_of_last = 0
else:
index_of_first = None
index_of_last = None
try: # Activate figure zooming
mng = plt.get_current_fig_manager()
mng.toolbar.zoom()
except:
pass
try: # Maximise figure window size
mng.resize(*mng.window.maxsize())
except:
pass
return fig, ax, crs, lons.T, lats.T, index_of_first, index_of_last
def get_lonlats(self):
if self.history is not None:
lons = self.history['lon']
lats = self.history['lat']
else:
if self.steps_output > 0:
lons = np.ma.array(np.reshape(self.elements.lon, (1, -1))).T
lats = np.ma.array(np.reshape(self.elements.lat, (1, -1))).T
else:
lons = np.ma.array(
np.reshape(self.elements_scheduled.lon, (1, -1))).T
lats = np.ma.array(
np.reshape(self.elements_scheduled.lat, (1, -1))).T
return lons, lats
def animation(self,
buffer=.2,
corners=None,
filename=None,
compare=None,
compare_marker='o',
background=None,
bgalpha=.5,
vmin=None,
vmax=None,
drifter=None,
skip=None,
scale=None,
color=False,
clabel=None,
colorbar=True,
cmap=None,
density=False,
show_elements=True,
show_trajectories=False,
trajectory_alpha=.1,
hide_landmask=False,
density_pixelsize_m=1000,
unitfactor=1,
lcs=None,
surface_only=False,
markersize=20,
origin_marker=None,
legend=None,
legend_loc='best',
fps=8,
lscale=None,
fast=False,
blit=False,
**kwargs):
"""Animate last run."""
filename = str(filename) if filename is not None else None
if self.history is not None and self.num_elements_total(
) == 0 and not hasattr(self, 'ds'):
raise ValueError('Please run simulation before animating')
if compare is not None:
compare_list, compare_args = self._get_comparison_xy_for_plots(
compare)
kwargs.update(compare_args)
markersizebymass = False
if isinstance(markersize, str):
if markersize == 'mass':
markersizebymass = True
markersize = 20
start_time = datetime.now()
if cmap is None:
cmap = 'jet'
if isinstance(cmap, str):
cmap = matplotlib.cm.get_cmap(cmap)
if color is False and background is None and lcs is None and density is False:
colorbar = False
markercolor = self.plot_comparison_colors[0]
if isinstance(density, str):
# Density field is weighted by this variable
# TODO: not yet implemented!
density_weight = density
density = True
else:
if density is True:
density_weight = None
elif density is not False:
density_weight = density
density = True
if density is True: # Get density arrays
if hasattr(self, 'ds'): # opened with Xarray
if origin_marker is None:
origin_marker = 0
per_origin_marker = False
else:
per_origin_marker = True
H, H_om, lon_array, lat_array = self.get_density_xarray(
pixelsize_m=density_pixelsize_m, weights=density_weight)
if per_origin_marker is True:
H = H_om[:, :, :, origin_marker]
else:
if origin_marker is not None:
raise ValueError(
'Separation by origin_marker is only active when imported from file with '
'open_xarray: https://opendrift.github.io/gallery/example_huge_output.html'
)
H, H_submerged, H_stranded, lon_array, lat_array = \
self.get_density_array(pixelsize_m=density_pixelsize_m,
weight=density_weight)
H = H + H_submerged + H_stranded
# x, y are longitude, latitude -> i.e. in a PlateCarree CRS
gcrs = ccrs.PlateCarree()
def plot_timestep(i):
"""Sub function needed for matplotlib animation."""
ret = [points, points_deactivated] # list of elements to return for blitting
ax.set_title('%s\n%s UTC' % (self._figure_title(), times[i]))
if background is not None:
ret.append(bg)
if isinstance(background, xr.DataArray):
scalar = background[i, :, :].values
else:
map_x, map_y, scalar, u_component, v_component = \
self.get_map_background(ax, background,
time=times[i])
# https://stackoverflow.com/questions/18797175/animation-with-pcolormesh-routine-in-matplotlib-how-do-i-initialize-the-data
bg.set_array(scalar[:-1, :-1].ravel())
if type(background) is list:
ret.append(bg_quiv)
bg_quiv.set_UVC(u_component[::skip, ::skip],
v_component[::skip, ::skip])
if lcs is not None:
ax.pcolormesh(lcs['lon'],
lcs['lat'],
lcs['ALCS'][i, :, :],
alpha=bgalpha,
vmin=vmin,
vmax=vmax,
cmap=cmap,
transform=gcrs)
if density is True:
# Update density plot
pm.set_array(H[i, :, :].ravel())
ret.append(pm)
# Move points
if show_elements is True:
points.set_offsets(np.c_[x[i, range(x.shape[1])],
y[i, range(x.shape[1])]])
points_deactivated.set_offsets(
np.c_[x_deactive[index_of_last_deactivated < i],
y_deactive[index_of_last_deactivated < i]])
if markersizebymass:
points.set_sizes(
100 * (self.history['mass'][:, i] /
(self.history['mass'][:, i] +
self.history['mass_degraded'][:, i] +
self.history['mass_volatilized'][:, i])))
if color is not False: # Update colors
points.set_array(colorarray[:, i])
if compare is not None:
for cd in compare_list:
cd['points_other'].set_array(colorarray[:, i])
if isinstance(color, str) or hasattr(color, '__len__'):
points_deactivated.set_array(colorarray_deactivated[
index_of_last_deactivated < i])
if drifter is not None:
drifter_pos.set_offsets(np.c_[drifter['x'][i],
drifter['y'][i]])
ret.append(drifter_pos)
if show_elements is True:
if compare is not None:
for cd in compare_list:
cd['points_other'].set_offsets(
np.c_[cd['x_other'][range(cd['x_other'].shape[0]),
i],
cd['y_other'][range(cd['x_other'].shape[0]),
i]])
cd['points_other_deactivated'].set_offsets(np.c_[
cd['x_other_deactive'][
cd['index_of_last_deactivated_other'] < i],
cd['y_other_deactive'][
cd['index_of_last_deactivated_other'] < i]])
ret.append(cd['points_other'])
ret.append(cd['points_other_deactivated'])
return ret
# Find map coordinates and plot points with empty data
fig, ax, crs, x, y, index_of_first, index_of_last = \
self.set_up_map(buffer=buffer, corners=corners, lscale=lscale,
fast=fast, hide_landmask=hide_landmask, **kwargs)
if surface_only is True:
z = self.get_property('z')[0]
x[z < 0] = np.nan
y[z < 0] = np.nan
if show_trajectories is True:
ax.plot(x, y, color='gray', alpha=trajectory_alpha, transform=gcrs)
if color is not False and show_elements is True:
if isinstance(color, str):
colorarray = self.get_property(color)[0].T
colorarray = colorarray * unitfactor
colorarray_deactivated = \
self.get_property(color)[0][
index_of_last[self.elements_deactivated.ID-1],
self.elements_deactivated.ID-1].T
elif hasattr(color,
'__len__'): # E.g. array/list of ensemble numbers
colorarray_deactivated = color[self.elements_deactivated.ID -
1]
colorarray = np.tile(color, (self.steps_output, 1)).T
else:
colorarray = color
if vmin is None:
vmin = colorarray.min()
vmax = colorarray.max()
if background is not None:
if isinstance(background, xr.DataArray):
map_x = background.coords['lon_bin']
map_y = background.coords['lat_bin']
scalar = background[0, :, :]
map_y, map_x = np.meshgrid(map_y, map_x)
else:
map_x, map_y, scalar, u_component, v_component = \
self.get_map_background(ax, background,
time=self.start_time)
bg = ax.pcolormesh(map_x,
map_y,
scalar[:-1, :-1],
alpha=bgalpha,
zorder=1,
antialiased=True,
linewidth=0.0,
rasterized=True,
vmin=vmin,
vmax=vmax,
cmap=cmap,
transform=gcrs)
if type(background) is list:
bg_quiv = ax.quiver(map_x[::skip, ::skip],
map_y[::skip, ::skip],
u_component[::skip, ::skip],
v_component[::skip, ::skip],
scale=scale,
zorder=1,
transform=gcrs)
if lcs is not None:
if vmin is None:
vmin = lcs['ALCS'].min()
vmax = lcs['ALCS'].max()
lcsh = ax.pcolormesh(lcs['lon'],
lcs['lat'],
lcs['ALCS'][0, :, :],
vmin=vmin,
vmax=vmax,
cmap=cmap,
transform=gcrs)
times = self.get_time_array()[0]
if show_elements is True:
index_of_last_deactivated = \
index_of_last[self.elements_deactivated.ID-1]
if legend is None:
legend = ['']
if color is False:
c = markercolor
else:
c = []
if markersizebymass:
points = ax.scatter([], [],
c=c,
zorder=10,
edgecolor=[],
cmap=cmap,
alpha=.4,
vmin=vmin,
vmax=vmax,
label=legend[0],
transform=gcrs)
else:
points = ax.scatter([], [],
c=c,
zorder=10,
edgecolor=[],
cmap=cmap,
s=markersize,
vmin=vmin,
vmax=vmax,
label=legend[0],
transform=gcrs)
if (compare is None) and (legend != ['']):
markers = []
for legend_index in np.arange(len(legend)):
markers.append(
matplotlib.lines.Line2D(
[0], [0],
marker='o',
color='w',
markerfacecolor=cmap(legend_index / (len(legend) - 1)),
markersize=10,
label=legend[legend_index]))
ax.legend(markers, legend, loc=legend_loc)
# Plot deactivated elements, with transparency
if markersizebymass:
points_deactivated = ax.scatter([], [],
c=c,
zorder=9,
vmin=vmin,
vmax=vmax,
s=markersize,
cmap=cmap,
edgecolor=[],
alpha=0,
transform=gcrs)
else:
points_deactivated = ax.scatter([], [],
c=c,
zorder=9,
vmin=vmin,
vmax=vmax,
s=markersize,
cmap=cmap,
edgecolor=[],
alpha=.3,
transform=gcrs)
x_deactive, y_deactive = (self.elements_deactivated.lon,
self.elements_deactivated.lat)
if compare is not None:
for cn, cd in enumerate(compare_list):
if legend != ['']:
legstr = legend[cn + 1]
else:
legstr = None
if color is False:
c = self.plot_comparison_colors[cn+1]
else:
c = []
cd['points_other'] = \
ax.scatter([], [], c=c, marker=compare_marker, cmap=cmap,
s=markersize, label=legstr, zorder=10, transform = gcrs)
# Plot deactivated elements, with transparency
cd['points_other_deactivated'] = \
ax.scatter([], [], alpha=.3, zorder=9, marker=compare_marker, cmap=cmap,
c=c, s=markersize, transform = gcrs)
if legend != ['', '']:
plt.legend(markerscale=2, loc=legend_loc)
if density is True:
cmap.set_under('w')
H = np.ma.masked_where(H == 0, H)
lat_array, lon_array = np.meshgrid(lat_array, lon_array)
if vmax is None:
vmax = H.max()
pm = ax.pcolormesh(lon_array,
lat_array,
H[0, :, :],
vmin=0.1,
vmax=vmax,
cmap=cmap,
transform=gcrs)
if drifter is not None:
# Interpolate drifter time series onto simulation times
sts = np.array(
[t.total_seconds() for t in np.array(times) - times[0]])
dts = np.array([
t.total_seconds() for t in np.array(drifter['time']) - times[0]
])
drifter['x'] = np.interp(sts, dts, drifter['lon'])
drifter['y'] = np.interp(sts, dts, drifter['lat'])
drifter['x'][sts < dts[0]] = np.nan
drifter['x'][sts > dts[-1]] = np.nan
drifter['y'][sts < dts[0]] = np.nan
drifter['y'][sts > dts[-1]] = np.nan
dlabel = drifter['label'] if 'label' in drifter else 'Drifter'
dcolor = drifter['color'] if 'color' in drifter else 'r'
dlinewidth = drifter['linewidth'] if 'linewidth' in drifter else 2
dmarkersize = drifter[
'markersize'] if 'markersize' in drifter else 20
drifter_pos = ax.scatter([], [],
c=dcolor,
zorder=15,
s=dmarkersize,
label=dlabel,
transform=gcrs)
ax.plot(drifter['x'],
drifter['y'],
color=dcolor,
linewidth=dlinewidth,
zorder=14,
transform=gcrs)
plt.legend()
fig.canvas.draw()
fig.set_tight_layout(True)
if colorbar is True:
if color is not False:
if isinstance(color, str) or clabel is not None:
if clabel is None:
clabel = color
item = points
elif density is not False:
item = pm
if clabel is None:
clabel = 'density'
elif lcs is not None:
item = lcsh
if clabel is None:
clabel = 'LCS'
elif background is not None:
item = bg
if clabel is None:
if isinstance(background, xr.DataArray):
clabel = background.name
else:
clabel = background
cb = fig.colorbar(item,
orientation='horizontal',
pad=.05,
aspect=30,
shrink=.8,
drawedges=False)
cb.set_label(clabel)
frames = x.shape[0]
if compare is not None:
frames = min(x.shape[0], cd['x_other'].shape[1])
# blit is now provided to animation()
#blit = sys.platform != 'darwin' # blitting does not work on mac os
self.__save_or_plot_animation__(plt.gcf(),
plot_timestep,
filename,
frames,
fps,
interval=50,
blit = blit)
logger.info('Time to make animation: %s' %
(datetime.now() - start_time))
def __save_or_plot_animation__(self, figure, plot_timestep, filename,
frames, fps, interval, blit):
if filename is not None or 'sphinx_gallery' in sys.modules:
logger.debug("Saving animation..")
self.__save_animation__(figure,
plot_timestep,
filename,
frames=frames,
fps=fps,
blit=blit,
interval=interval)
else:
logger.debug("Showing animation..")
anim=animation.FuncAnimation(figure,
plot_timestep,
blit=blit,
frames=frames,
interval=interval)
try:
plt.show()
except AttributeError as e:
logger.exception(e)
pass
def animation_profile(self,
filename=None,
compare=None,
legend=['', ''],
markersize=5,
fps=20,
color=None,
cmap=None,
vmin=None,
vmax=None,
legend_loc=None):
"""Animate vertical profile of the last run."""
start_time = datetime.now()
def plot_timestep(i):
"""Sub function needed for matplotlib animation."""
#plt.gcf().gca().set_title(str(i))
ax.set_title('%s UTC' % times[i])
if PlotColors:
points.set_offsets(
np.array(
[x[range(x.shape[0]), i].T, z[range(x.shape[0]),
i].T]).T)
points.set_array(colorarray[:, i])
else:
points.set_data(x[range(x.shape[0]), i], z[range(x.shape[0]),
i])
points_deactivated.set_data(
x_deactive[index_of_last_deactivated < i],
z_deactive[index_of_last_deactivated < i])
if compare is not None:
points_other.set_data(x_other[range(x_other.shape[0]), i],
z_other[range(x_other.shape[0]), i])
points_other_deactivated.set_data(
x_other_deactive[index_of_last_deactivated_other < i],
z_other_deactive[index_of_last_deactivated_other < i])
return points, points_deactivated, points_other,
else:
return points, points_deactivated,
PlotColors = (compare is None) and (legend != ['', ''])
if PlotColors:
if cmap is None:
cmap = 'jet'
if isinstance(cmap, str):
cmap = matplotlib.cm.get_cmap(cmap)
if color is not False:
if isinstance(color, str):
colorarray = self.get_property(color)[0].T
# Set up plot
index_of_first, index_of_last = \
self.index_of_activation_and_deactivation()
z = self.get_property('z')[0].T
x = self.get_property('lon')[0].T
#seafloor_depth = \
# -self.get_property('sea_floor_depth_below_sea_level')[0].T
fig = plt.figure(figsize=(10, 6.)) # Suitable aspect ratio
ax = fig.gca()
plt.xlabel('Longitude [degrees]')
plt.ylabel('Depth [m]')
times = self.get_time_array()[0]
index_of_last_deactivated = \
index_of_last[self.elements_deactivated.ID-1]
if PlotColors:
points = ax.scatter([], [],
c=[],
zorder=10,
edgecolor=[],
cmap=cmap,
s=markersize,
vmin=vmin,
vmax=vmax)
markers = []
for legend_index in np.arange(len(legend)):
markers.append(
matplotlib.lines.Line2D(
[0], [0],
marker='o',
linewidth=0,
markeredgewidth=0,
markerfacecolor=cmap(legend_index / (len(legend) - 1)),
markersize=10,
label=legend[legend_index]))
leg = ax.legend(markers, legend, loc=legend_loc)
leg.set_zorder(20)
else:
points = plt.plot([], [],
'.k',
label=legend[0],
markersize=markersize)[0]
# Plot deactivated elements, with transparency
points_deactivated = plt.plot([], [], '.k', alpha=.3)[0]
x_deactive = self.elements_deactivated.lon
z_deactive = self.elements_deactivated.z
if compare is not None:
if type(compare) is str:
# Other is given as filename
other = self.__class__(loglevel=0)
other.io_import_file(compare)
else:
# Other is given as an OpenDrift object
other = compare
z_other = other.get_property('z')[0].T
x_other = self.get_property('lon')[0].T
points_other = plt.plot(x_other[0, 0],
z_other[0, 0],
'.r',
label=legend[1],
markersize=markersize)[0]
# Plot deactivated elements, with transparency
points_other_deactivated = plt.plot([], [], '.r', alpha=.3)[0]
x_other_deactive = other.elements_deactivated.lon
z_other_deactive = other.elements_deactivated.z
firstlast = np.ma.notmasked_edges(x_other, axis=1)
index_of_last_other = firstlast[1][1]
index_of_last_deactivated_other = \
index_of_last_other[other.elements_deactivated.ID-1]
xmax = np.maximum(x.max(), x_other.max())
xmin = np.minimum(x.min(), x_other.min())
zmax = np.maximum(z.max(), z_other.max())
zmin = np.minimum(z.min(), z_other.min())
else:
xmin = x.min()
xmax = x.max()
zmin = z.min()
zmax = z.max()
# Set figure limits
sky = (zmax - zmin) * .1 # Sky height is 10% of water depth
plt.xlim([xmin, xmax])
plt.ylim([zmin, sky])
ax.add_patch(
plt.Rectangle((xmin, 0), xmax - xmin, sky, color='lightsteelblue'))
ax.add_patch(
plt.Rectangle((xmin, zmin),
xmax - xmin,
-zmin,
color='cornflowerblue'))
if legend != ['', ''] and PlotColors is False:
plt.legend(loc=4)
self.__save_or_plot_animation__(plt.gcf(),
plot_timestep,
filename,
x.shape[1],
fps,
interval=150,
blit=False)
logger.info('Time to make animation: %s' %
(datetime.now() - start_time))
def _get_comparison_xy_for_plots(self, compare):
if not type(compare) is list:
compare = [compare]
compare_list = [{}] * len(compare)
lonmin = 1000
lonmax = -1000
latmin = 1000
latmax = -1000
for cn, comp in enumerate(compare):
compare_list[cn] = {}
cd = compare_list[cn] # pointer to dict with data
if type(comp) is str:
# Other is given as filename
other = self.__class__(loglevel=0)
other.io_import_file(comp)
else:
# Other is given as an OpenDrift object
other = comp
lonmin = np.minimum(lonmin, np.nanmin(other.history['lon']))
lonmax = np.maximum(lonmax, np.nanmax(other.history['lon']))
latmin = np.minimum(latmin, np.nanmin(other.history['lat']))
latmax = np.maximum(latmax, np.nanmax(other.history['lat']))
# Find map coordinates of comparison simulations
cd['x_other'], cd['y_other'] = \
(other.history['lon'].copy(), other.history['lat'].copy())
cd['x_other_deactive'], cd['y_other_deactive'] = \
(other.elements_deactivated.lon.copy(),
other.elements_deactivated.lat.copy())
cd['firstlast'] = np.ma.notmasked_edges(cd['x_other'], axis=1)
cd['index_of_last_other'] = cd['firstlast'][1][1]
cd['index_of_last_deactivated_other'] = \
cd['index_of_last_other'][other.elements_deactivated.ID-1]
compare_args = {
'compare_lonmin': lonmin,
'compare_lonmax': lonmax,
'compare_latmin': latmin,
'compare_latmax': latmax
}
return compare_list, compare_args
def plot(self,
background=None,
buffer=.2,
corners=None,
linecolor=None,
filename=None,
show=True,
vmin=None,
vmax=None,
compare=None,
cmap='jet',
lvmin=None,
lvmax=None,
skip=None,
scale=None,
show_scalar=True,
contourlines=False,
trajectory_dict=None,
colorbar=True,
linewidth=1,
lcs=None,
show_elements=True,
show_initial=True,
density_pixelsize_m=1000,
bgalpha=1,
clabel=None,
surface_color=None,
submerged_color=None,
markersize=20,
title='auto',
legend=True,
legend_loc='best',
lscale=None,
fast=False,
hide_landmask=False,
**kwargs):
"""Basic built-in plotting function intended for developing/debugging.
Plots trajectories of all particles.
Positions marked with colored stars:
- green: all start positions
- red: deactivated particles
- blue: particles still active at end of simulation
Requires availability of Cartopy.
Arguments:
background: string, name of variable (standard_name) which will
be plotted as background of trajectories, provided that it
can be read with one of the available readers.
buffer: float; spatial buffer of plot in degrees of
longitude/latitude around particle collection.
background: name of variable to be plotted as background field.
Use two element list for vector fields, e.g. ['x_wind', 'y_wind']
vmin, vmax: minimum and maximum values for colors of background.
linecolor: name of variable to be used for coloring trajectories, or matplotlib color string.
lvmin, lvmax: minimum and maximum values for colors of trajectories.
lscale (string): resolution of land feature ('c', 'l', 'i', 'h', 'f', 'auto'). default is 'auto'.
fast (bool): use some optimizations to speed up plotting at the cost of accuracy
:param hide_landmask: do not plot landmask (default False).
:type hide_landmask: bool
"""
mappable = None
if self.history is not None and self.num_elements_total(
) == 0 and not hasattr(self, 'ds'):
raise ValueError('Please run simulation before animating')
start_time = datetime.now()
# x, y are longitude, latitude -> i.e. in a PlateCarree CRS
gcrs = ccrs.PlateCarree()
if compare is not None:
# Extend map coverage to cover comparison simulations
cd, compare_args = self._get_comparison_xy_for_plots(compare)
kwargs.update(compare_args)
if trajectory_dict is not None:
# Extend map coverage to cover provided trajectory
ttime = np.array(trajectory_dict['time'])
i = np.where((ttime >= self.start_time) & (ttime <= self.time))[0]
trajectory_dict['lon'] = np.atleast_1d(trajectory_dict['lon'])
trajectory_dict['lat'] = np.atleast_1d(trajectory_dict['lat'])
tlonmin = trajectory_dict['lon'][i].min()
tlonmax = trajectory_dict['lon'][i].max()
tlatmin = trajectory_dict['lat'][i].min()
tlatmax = trajectory_dict['lat'][i].max()
if 'compare_lonmin' not in kwargs:
kwargs['compare_lonmin'] = tlonmin
kwargs['compare_lonmax'] = tlonmax
kwargs['compare_latmin'] = tlatmin
kwargs['compare_latmax'] = tlatmax
else:
kwargs['compare_lonmin'] = np.minimum(kwargs['compare_lonmin'],
tlonmin)
kwargs['compare_lonmax'] = np.maximum(kwargs['compare_lonmax'],
tlonmax)
kwargs['compare_latmin'] = np.minimum(kwargs['compare_latmin'],
tlatmin)
kwargs['compare_latmax'] = np.maximum(kwargs['compare_latmax'],
tlatmax)
fig, ax, crs, x, y, index_of_first, index_of_last = \
self.set_up_map(buffer=buffer, corners=corners, lscale=lscale, fast=fast, hide_landmask=hide_landmask, **kwargs)
markercolor = self.plot_comparison_colors[0]
# The more elements, the more transparent we make the lines
min_alpha = 0.1
max_elements = 5000.0
alpha = min_alpha**(2 * (self.num_elements_total() - 1) /
(max_elements - 1))
alpha = np.max((min_alpha, alpha))
if legend is False:
legend = None
if self.history is not None and linewidth != 0:
# Plot trajectories
from matplotlib.colors import is_color_like
if linecolor is None or is_color_like(linecolor) is True:
if is_color_like(linecolor):
linecolor = linecolor
else:
linecolor = 'gray'
if compare is not None and legend is not None:
if legend is True:
if hasattr(compare, 'len'):
numleg = len(compare)
else:
numleg = 2
legend = [
'Simulation %d' % (i + 1) for i in range(numleg)
]
ax.plot(x[:, 0],
y[:, 0],
color=linecolor,
alpha=alpha,
label=legend[0],
linewidth=linewidth,
transform=gcrs)
ax.plot(x,
y,
color=linecolor,
alpha=alpha,
label='_nolegend_',
linewidth=linewidth,
transform=gcrs)
else:
ax.plot(x,
y,
color=linecolor,
alpha=alpha,
linewidth=linewidth,
transform=gcrs)
else:
#colorbar = True
# Color lines according to given parameter
try:
if isinstance(linecolor, str):
param = self.history[linecolor]
elif hasattr(linecolor, '__len__'):
param = np.tile(linecolor, (self.steps_output, 1)).T
else:
param = linecolor
except:
raise ValueError(
'Available parameters to be used for linecolors: ' +
str(self.history.dtype.fields))
from matplotlib.collections import LineCollection
for i in range(x.shape[1]):
vind = np.arange(index_of_first[i], index_of_last[i] + 1)
points = np.array([x[vind, i].T,
y[vind, i].T]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]],
axis=1)
if lvmin is None:
lvmin = param.min()
lvmax = param.max()
lc = LineCollection(
segments,
#cmap=plt.get_cmap('Spectral'),
cmap=cmap,
norm=plt.Normalize(lvmin, lvmax),
transform=gcrs)
#lc.set_linewidth(3)
lc.set_array(param.T[vind, i])
mappable = ax.add_collection(lc)
#axcb = fig.colorbar(lc, ax = ax, orientation = 'horizontal')
#try: # Add unit to colorbar if available
# colorbarstring = linecolor + ' [%s]' % \
# (self.history_metadata[linecolor]['units'])
#except:
# colorbarstring = linecolor
##axcb.set_label(colorbarstring)
#axcb.set_label(colorbarstring, size=14)
#axcb.ax.tick_params(labelsize=14)
if compare is None:
label_initial = 'initial (%i)' % x.shape[1]
label_active = 'active (%i)' % (x.shape[1] -
self.num_elements_deactivated())
color_initial = self.status_colors['initial']
color_active = self.status_colors['active']
else:
label_initial = None
label_active = None
color_initial = 'gray'
color_active = 'gray'
if show_elements is True:
if show_initial is True:
ax.scatter(x[index_of_first, range(x.shape[1])],
y[index_of_first, range(x.shape[1])],
s=markersize,
zorder=10,
edgecolor=markercolor,
linewidths=.2,
c=color_initial,
label=label_initial,
transform=gcrs)
if surface_color is not None:
color_active = surface_color
label_active = 'surface'
ax.scatter(x[index_of_last, range(x.shape[1])],
y[index_of_last, range(x.shape[1])],
s=markersize,
zorder=3,
edgecolor=markercolor,
linewidths=.2,
c=color_active,
label=label_active,
transform=gcrs)
#if submerged_color is not None:
# map.scatter(x[range(x.shape[0]), index_of_last],
# y[range(x.shape[0]), index_of_last], s=markersize,
# zorder=3, edgecolor=markercolor, linewidths=.2,
# c=submerged_color, label='submerged')
x_deactivated, y_deactivated = (self.elements_deactivated.lon,
self.elements_deactivated.lat)
# Plot deactivated elements, labeled by deactivation reason
for statusnum, status in enumerate(self.status_categories):
if status == 'active':
continue # plotted above
if status not in self.status_colors:
# If no color specified, pick an unused one
for color in [
'red', 'blue', 'green', 'black', 'gray', 'cyan',
'DarkSeaGreen', 'brown'
]:
if color not in self.status_colors.values():
self.status_colors[status] = color
break
indices = np.where(
self.elements_deactivated.status == statusnum)
if len(indices[0]) > 0:
if (status == 'seeded_on_land'
or status == 'seeded_at_nodata_position'):
zorder = 11
else:
zorder = 3
if compare is not None:
legstr = None
else:
legstr = '%s (%i)' % (status, len(indices[0]))
if compare is None:
color_status = self.status_colors[status]
else:
color_status = 'gray'
ax.scatter(x_deactivated[indices],
y_deactivated[indices],
s=markersize,
zorder=zorder,
edgecolor=markercolor,
linewidths=.1,
c=color_status,
label=legstr,
transform=gcrs)
if compare is not None:
for i, c in enumerate(cd):
if legend != None:
legstr = legend[i + 1]
else:
legstr = None
ax.plot(c['x_other'].T[:, 0],
c['y_other'].T[:, 0],
color=self.plot_comparison_colors[i + 1],
linestyle='-',
label=legstr,
linewidth=linewidth,
transform=gcrs)
ax.plot(c['x_other'].T,
c['y_other'].T,
color=self.plot_comparison_colors[i + 1],
linestyle='-',
label='_nolegend_',
linewidth=linewidth,
transform=gcrs)
ax.scatter(c['x_other'][range(c['x_other'].shape[0]),
c['index_of_last_other']],
c['y_other'][range(c['y_other'].shape[0]),
c['index_of_last_other']],
s=markersize,
zorder=3,
edgecolor=markercolor,
linewidths=.2,
c=self.plot_comparison_colors[i + 1],
transform=gcrs)
if background is not None:
if hasattr(self, 'time'):
time = self.time - self.time_step_output
else:
time = None
if isinstance(background, xr.DataArray):
map_x = background.coords['lon_bin']
map_y = background.coords['lat_bin']
scalar = background
map_y, map_x = np.meshgrid(map_y, map_x)
elif background == 'residence':
scalar, lon_res, lat_res = self.get_residence_time(
pixelsize_m=density_pixelsize_m)
scalar[scalar == 0] = np.nan
lon_res, lat_res = np.meshgrid(lon_res[0:-1], lat_res[0:-1])
lon_res = lon_res.T
lat_res = lat_res.T
map_x, map_y = (lon_res, lat_res)
else:
map_x, map_y, scalar, u_component, v_component = \
self.get_map_background(ax, background, time=time)
#self.time_step_output)
if show_scalar is True:
if contourlines is False:
scalar = np.ma.masked_invalid(scalar)
mappable = ax.pcolormesh(map_x,
map_y,
scalar,
alpha=bgalpha,
zorder=1,
vmin=vmin,
vmax=vmax,
cmap=cmap,
transform=gcrs)
else:
if contourlines is True:
CS = ax.contour(map_x,
map_y,
scalar,
colors='gray',
transform=gcrs)
else:
# contourlines is an array of values
CS = ax.contour(map_x,
map_y,
scalar,
contourlines,
colors='gray',
transform=gcrs)
plt.clabel(CS, fmt='%g')
if mappable is not None and colorbar is True:
cb = fig.colorbar(mappable,
orientation='horizontal',
pad=.05,
aspect=30,
shrink=.8,
drawedges=False)
# TODO: need better control of colorbar content
if clabel is not None:
cb.set_label(clabel)
elif isinstance(linecolor, str) and linecolor != 'gray':
cb.set_label(str(linecolor))
if background is not None and clabel is None:
if isinstance(background, xr.DataArray):
cb.set_label(background.name)
else:
cb.set_label(str(background))
if type(background) is list:
ax.quiver(map_x[::skip, ::skip],
map_y[::skip, ::skip],
u_component[::skip, ::skip],
v_component[::skip, ::skip],
scale=scale,
transform=gcrs, zorder=1)
if lcs is not None:
map_x_lcs, map_y_lcs = (lcs['lon'], lcs['lat'])
ax.pcolormesh(map_x_lcs,
map_y_lcs,
lcs['ALCS'][0, :, :],
alpha=1,
vmin=vmin,
vmax=vmax,
zorder=0,
cmap=cmap,
transform=gcrs)
if title is not None:
if title == 'auto':
if hasattr(self, 'time'):
plt.title('%s\n%s to %s UTC (%i steps)' %
(self._figure_title(),
self.start_time.strftime('%Y-%m-%d %H:%M'),
self.time.strftime('%Y-%m-%d %H:%M'),
self.steps_output))
else:
plt.title(
'%s\n%i elements seeded at %s UTC' %
(self._figure_title(), self.num_elements_scheduled(),
self.elements_scheduled_time[0].strftime(
'%Y-%m-%d %H:%M')))
else:
plt.title(title)
if trajectory_dict is not None:
self._plot_trajectory_dict(ax, trajectory_dict)
try:
handles, labels = ax.get_legend_handles_labels()
if legend is not None and len(handles) > 0:
plt.legend(loc=legend_loc, markerscale=2)
except Exception as e:
logger.warning('Cannot plot legend, due to bug in matplotlib:')
logger.warning(traceback.format_exc())
#plt.gca().tick_params(labelsize=14)
#fig.canvas.draw()
#fig.set_tight_layout(True)
if filename is not None:
plt.savefig(filename)
logger.info('Time to make plot: ' +
str(datetime.now() - start_time))
else:
if show is True:
plt.show()
return ax, plt
def _substance_name(self):
return None
def _figure_title(self):
if self._substance_name() is None:
return 'OpenDrift - ' + type(self).__name__
else:
return 'OpenDrift - ' + type(
self).__name__ + ' (%s)' % self._substance_name()
def _plot_trajectory_dict(self, ax, trajectory_dict):
'''Plot provided trajectory along with simulated'''
time = trajectory_dict['time']
time = np.array(time)
i = np.where((time >= self.start_time) & (time <= self.time))[0]
x, y = (np.atleast_1d(trajectory_dict['lon'])[i],
np.atleast_1d(trajectory_dict['lat'])[i])
ls = trajectory_dict['linestyle']
if 'label' in trajectory_dict:
label = trajectory_dict['label']
else:
label = None
gcrs = ccrs.PlateCarree()
ax.plot(x, y, ls, linewidth=2, transform=gcrs, label=label)
ax.plot(x[0], y[0], 'ok', transform=gcrs)
ax.plot(x[-1], y[-1], 'xk', transform=gcrs)
def get_map_background(self, ax, background, time=None):
# Get background field for plotting on map or animation
# TODO: this method should be made more robust
if type(background) is list:
variable = background[0] # A vector is requested
else:
variable = background # A scalar is requested
for readerName in self.readers:
reader = self.readers[readerName]
if variable in reader.variables:
if time is None or reader.start_time is None or (
time >= reader.start_time and time <= reader.end_time
) or (reader.always_valid is True):
break
if time is None:
if hasattr(self, 'elements_scheduled_time'):
# Using time of first seeded element
time = self.elements_scheduled_time[0]
# Get reader coordinates covering given map area
axisproj = pyproj.Proj(ax.projection.proj4_params)
xmin, xmax, ymin, ymax = ax.get_extent(ccrs.PlateCarree())
cornerlons = np.array([xmin, xmin, xmax, xmax])
cornerlats = np.array([ymin, ymax, ymin, ymax])
reader_x, reader_y = reader.lonlat2xy(cornerlons, cornerlats)
if sum(~np.isfinite(reader_x + reader_y)) > 0:
# Axis corner points are not within reader domain
reader_x = np.array([reader.xmin, reader.xmax])
reader_y = np.array([reader.ymin, reader.ymax])
else:
reader_x = np.linspace(reader_x.min(), reader_x.max(), 10)
reader_y = np.linspace(reader_y.min(), reader_y.max(), 10)
data = reader.get_variables(background, time, reader_x, reader_y, None)
reader_x, reader_y = np.meshgrid(data['x'], data['y'])
if type(background) is list: # Ensemble reader, using first member
u_component = data[background[0]]
v_component = data[background[1]]
if isinstance(u_component, list):
u_component = u_component[0]
v_component = v_component[0]
with np.errstate(invalid='ignore'):
scalar = np.sqrt(u_component**2 + v_component**2)
# NB: rotation not completed!
u_component, v_component = reader.rotate_vectors(
reader_x, reader_y, u_component, v_component, reader.proj,
ccrs.PlateCarree(globe=ccrs.Globe(datum='WGS84',
ellipse='WGS84')).proj4_init)
else:
scalar = data[background]
if isinstance(scalar, list): # Ensemble reader, using first member
scalar = scalar[0]
u_component = v_component = None
if reader.projected is False:
reader_y[reader_y < 0] = 0
reader_x[reader_x < 0] = 0
rlons, rlats = reader.xy2lonlat(reader_x, reader_y)
if rlons.max() > 360:
rlons = rlons - 360
map_x, map_y = (rlons, rlats)
scalar = np.ma.masked_invalid(scalar)
return map_x, map_y, scalar, u_component, v_component
def get_lonlat_bins(self, pixelsize_m):
latmin = self.latmin
latmax = self.latmax
lonmin = self.lonmin
lonmax = self.lonmax
deltalat = pixelsize_m / 111000.0 # m to degrees
deltalon = deltalat / np.cos(np.radians((latmin + latmax) / 2))
latbin = np.arange(latmin - deltalat, latmax + deltalat, deltalat)
lonbin = np.arange(lonmin - deltalon, lonmax + deltalon, deltalon)
return lonbin, latbin
def get_histogram(self, pixelsize_m, **kwargs):
from xhistogram.xarray import histogram
lonbin, latbin = self.get_lonlat_bins(pixelsize_m)
max_om = int(self.ds.origin_marker.max().compute().values)
origin_marker = range(max_om + 1)
if 'weights' in kwargs and kwargs[
'weights'] is not None and kwargs['weights'].ndim < 2:
kwargs['weights'] = xr.DataArray(
kwargs['weights'],
dims=['trajectory'],
coords={'trajectory': self.ds.coords['trajectory']})
# Xarray Dataset to store histogram per origin_marker
h_om = xr.DataArray(np.zeros(
(len(self.ds.coords['time']), len(lonbin) - 1, len(latbin) - 1,
max_om + 1)),
name='density_origin_marker',
dims=('time', 'lon_bin', 'lat_bin',
'origin_marker'))
h_om.coords['time'] = self.ds.coords['time']
h_om.coords['origin_marker'] = origin_marker
for om in origin_marker:
logger.info('\tcalculating for origin_marker %s...' % om)
h = histogram(self.ds.lon.where(self.ds.origin_marker == om),
self.ds.lat.where(self.ds.origin_marker == om),
bins=[lonbin, latbin],
dim=['trajectory'],
**kwargs)
if om == 0:
h_om.coords['lon_bin'] = h.coords['lon_bin']
h_om.coords['lat_bin'] = h.coords['lat_bin']
h_om[:, :, :, om] = h #.copy()
return h_om
def get_density_array(self, pixelsize_m, weight=None):
lon = self.get_property('lon')[0]
lat = self.get_property('lat')[0]
times = self.get_time_array()[0]
deltalat = pixelsize_m / 111000.0 # m to degrees
deltalon = deltalat / np.cos(
np.radians((np.nanmin(lat) + np.nanmax(lat)) / 2))
lat_array = np.arange(
np.nanmin(lat) - deltalat,
np.nanmax(lat) + deltalat, deltalat)
lon_array = np.arange(
np.nanmin(lon) - deltalat,
np.nanmax(lon) + deltalon, deltalon)
bins = (lon_array, lat_array)
z = self.get_property('z')[0]
if weight is not None:
weight_array = self.get_property(weight)[0]
status = self.get_property('status')[0]
lon_submerged = lon.copy()
lat_submerged = lat.copy()
lon_stranded = lon.copy()
lat_stranded = lat.copy()
lon_submerged[z >= 0] = 1000
lat_submerged[z >= 0] = 1000
lon[z < 0] = 1000
lat[z < 0] = 1000
H = np.zeros((len(times), len(lon_array) - 1,
len(lat_array) - 1)) #.astype(int)
H_submerged = H.copy()
H_stranded = H.copy()
try:
strandnum = self.status_categories.index('stranded')
lon_stranded[status != strandnum] = 1000
lat_stranded[status != strandnum] = 1000
contains_stranded = True
except ValueError:
contains_stranded = False
for i in range(len(times)):
if weight is not None:
weights = weight_array[i, :]
else:
weights = None
H[i,:,:], dummy, dummy = \
np.histogram2d(lon[i,:], lat[i,:],
weights=weights, bins=bins)
H_submerged[i,:,:], dummy, dummy = \
np.histogram2d(lon_submerged[i,:], lat_submerged[i,:],
weights=weights, bins=bins)
if contains_stranded is True:
H_stranded[i,:,:], dummy, dummy = \
np.histogram2d(lon_stranded[i,:], lat_stranded[i,:],
weights=weights, bins=bins)
return H, H_submerged, H_stranded, lon_array, lat_array
def get_density_array_proj(self,
pixelsize_m,
density_proj=None,
llcrnrlon=None,
llcrnrlat=None,
urcrnrlon=None,
urcrnrlat=None,
weight=None):
#
# TODO: should be merged with get_density_array
# KFD Jan 2021
#
lon = self.get_property('lon')[0]
lat = self.get_property('lat')[0]
times = self.get_time_array()[0]
#deltalat = pixelsize_m/111000.0 # m to degrees
#deltalon = deltalat/np.cos(np.radians((np.nanmin(lat) +
# np.nanmax(lat))/2))
#lat_array = np.arange(np.nanmin(lat)-deltalat,
# np.nanmax(lat)+deltalat, deltalat)
#lon_array = np.arange(np.nanmin(lon)-deltalat,
# np.nanmax(lon)+deltalon, deltalon)
#bins=(lon_array, lat_array)
if density_proj is None: # add default projection with equal-area property
density_proj = pyproj.Proj('+proj=moll +ellps=WGS84 +lon_0=0.0')
density_proj = pyproj.Proj('+proj=longlat +a=6371229 +no_defs')
# create a grid in the specified projection
x, y = density_proj(lon, lat)
if llcrnrlon is not None:
llcrnrx, llcrnry = density_proj(llcrnrlon, llcrnrlat)
urcrnrx, urcrnry = density_proj(urcrnrlon, urcrnrlat)
else:
llcrnrx, llcrnry = x.min() - pixelsize_m, y.min() - pixelsize_m
urcrnrx, urcrnry = x.max() + pixelsize_m, y.max() + pixelsize_m
x_array = np.arange(llcrnrx, urcrnrx, pixelsize_m)
y_array = np.arange(llcrnry, urcrnry, pixelsize_m)
bins = (x_array, y_array)
outsidex, outsidey = max(x_array) * 1.5, max(y_array) * 1.5
z = self.get_property('z')[0]
if weight is not None:
weight_array = self.get_property(weight)[0]
status = self.get_property('status')[0]
#lon_submerged = lon.copy()
#lat_submerged = lat.copy()
#lon_stranded = lon.copy()
#lat_stranded = lat.copy()
#lon_submerged[z>=0] = 1000
#lat_submerged[z>=0] = 1000
#lon[z<0] = 1000
#lat[z<0] = 1000
#H = np.zeros((len(times), len(lon_array) - 1,
# len(lat_array) - 1))#.astype(int)
x_submerged = x.copy()
y_submerged = y.copy()
x_stranded = x.copy()
y_stranded = y.copy()
x_submerged[z >= 0] = outsidex
y_submerged[z >= 0] = outsidey
x[z < 0] = outsidex
y[z < 0] = outsidey
H = np.zeros(
(len(times), len(x_array) - 1, len(y_array) - 1)) #.astype(int)
H_submerged = H.copy()
H_stranded = H.copy()
try:
strandnum = self.status_categories.index('stranded')
#lon_stranded[status!=strandnum] = 1000
#lat_stranded[status!=strandnum] = 1000
x_stranded[status != strandnum] = outsidex
y_stranded[status != strandnum] = outsidey
contains_stranded = True
except ValueError:
contains_stranded = False
for i in range(len(times)):
if weight is not None:
weights = weight_array[i, :]
else:
weights = None
H[i,:,:], dummy, dummy = \
np.histogram2d(x[i,:], y[i,:],
weights=weights, bins=bins)
H_submerged[i,:,:], dummy, dummy = \
np.histogram2d(x_submerged[i,:], y_submerged[i,:],
weights=weights, bins=bins)
if contains_stranded is True:
H_stranded[i,:,:], dummy, dummy = \
np.histogram2d(x_stranded[i,:], y_stranded[i,:],
weights=weights, bins=bins)
if density_proj is not None:
Y, X = np.meshgrid(y_array, x_array)
lon_array, lat_array = density_proj(X, Y, inverse=True)
return H, H_submerged, H_stranded, lon_array, lat_array
def get_residence_time(self, pixelsize_m):
H,H_sub, H_str,lon_array,lat_array = \
self.get_density_array(pixelsize_m)
residence = np.sum(H, axis=0)
return residence, lon_array, lat_array
def write_netcdf_density_map(self, filename, pixelsize_m='auto'):
'''Write netCDF file with map of particles densities'''
if pixelsize_m == 'auto':
lon, lat = self.get_lonlats()
latspan = lat.max() - lat.min()
pixelsize_m = 30
if latspan > .05:
pixelsize_m = 50
if latspan > .1:
pixelsize_m = 300
if latspan > .3:
pixelsize_m = 500
if latspan > .7:
pixelsize_m = 1000
if latspan > 2:
pixelsize_m = 2000
if latspan > 5:
pixelsize_m = 4000
H, H_submerged, H_stranded, lon_array, lat_array = \
self.get_density_array(pixelsize_m)
lon_array = (lon_array[0:-1] + lon_array[1::]) / 2
lat_array = (lat_array[0:-1] + lat_array[1::]) / 2
from netCDF4 import Dataset, date2num
nc = Dataset(filename, 'w')
nc.createDimension('lon', len(lon_array))
nc.createDimension('lat', len(lat_array))
nc.createDimension('time', H.shape[0])
times = self.get_time_array()[0]
timestr = 'seconds since 1970-01-01 00:00:00'
nc.createVariable('time', 'f8', ('time', ))
nc.variables['time'][:] = date2num(times, timestr)
nc.variables['time'].units = timestr
nc.variables['time'].standard_name = 'time'
# Projection
nc.createVariable('projection_lonlat', 'i8')
nc.variables['projection_lonlat'].grid_mapping_name = \
'latitude_longitude'
nc.variables['projection_lonlat'].earth_radius = 6371229.
nc.variables['projection_lonlat'].proj4 = \
'+proj=longlat +a=6371229 +no_defs'
# Coordinates
nc.createVariable('lon', 'f8', ('lon', ))
nc.createVariable('lat', 'f8', ('lat', ))
nc.variables['lon'][:] = lon_array
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].short_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lat'][:] = lat_array
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].short_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
# Density
nc.createVariable('density_surface', 'u1', ('time', 'lat', 'lon'))
H = np.swapaxes(H, 1, 2).astype('uint8')
H = np.ma.masked_where(H == 0, H)
nc.variables['density_surface'][:] = H
nc.variables['density_surface'].long_name = 'Detection probability'
nc.variables['density_surface'].grid_mapping = 'projection_lonlat'
nc.variables['density_surface'].units = '1'
# Density submerged
nc.createVariable('density_submerged', 'u1', ('time', 'lat', 'lon'))
H_sub = np.swapaxes(H_submerged, 1, 2).astype('uint8')
H_sub = np.ma.masked_where(H_sub == 0, H_sub)
nc.variables['density_submerged'][:] = H_sub
nc.variables[
'density_submerged'].long_name = 'Detection probability submerged'
nc.variables['density_submerged'].grid_mapping = 'projection_lonlat'
nc.variables['density_submerged'].units = '1'
# Density stranded
nc.createVariable('density_stranded', 'u1', ('time', 'lat', 'lon'))
H_stranded = np.swapaxes(H_stranded, 1, 2).astype('uint8')
H_stranded = np.ma.masked_where(H_stranded == 0, H_stranded)
nc.variables['density_stranded'][:] = H_stranded
nc.variables[
'density_stranded'].long_name = 'Detection probability stranded'
nc.variables['density_stranded'].grid_mapping = 'projection_lonlat'
nc.variables['density_stranded'].units = '1'
nc.close()
def write_netcdf_density_map_proj(self,
filename,
pixelsize_m='auto',
density_proj=None,
llcrnrlon=None,
llcrnrlat=None,
urcrnrlon=None,
urcrnrlat=None):
'''Write netCDF file with map of particles densities for a given projection or area'''
#
# TODO: should be merged with write_netcdf_density_map_proj
# KFD Jan 2021
#
if pixelsize_m == 'auto':
lon, lat = self.get_lonlats()
latspan = lat.max() - lat.min()
pixelsize_m = 30
if latspan > .05:
pixelsize_m = 50
if latspan > .1:
pixelsize_m = 300
if latspan > .3:
pixelsize_m = 500
if latspan > .7:
pixelsize_m = 1000
if latspan > 2:
pixelsize_m = 2000
if latspan > 5:
pixelsize_m = 4000
if density_proj is None: # add default projection with equal-area property
density_proj = pyproj.Proj('+proj=moll +ellps=WGS84 +lon_0=0.0')
H, H_submerged, H_stranded, lon_array, lat_array = \
self.get_density_array_proj(pixelsize_m=pixelsize_m,
density_proj=density_proj,
llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat)
# calculate center coordinates
print(lon_array.shape, lat_array.shape)
lon_array = (lon_array[:-1, :-1] + lon_array[1:, 1:]) / 2.
lat_array = (lat_array[:-1, :-1] + lat_array[1:, 1:]) / 2.
from netCDF4 import Dataset, date2num
nc = Dataset(filename, 'w')
nc.createDimension('x', lon_array.shape[0])
nc.createDimension('y', lon_array.shape[1])
nc.createDimension('time', H.shape[0])
times = self.get_time_array()[0]
timestr = 'seconds since 1970-01-01 00:00:00'
nc.createVariable('time', 'f8', ('time', ))
nc.variables['time'][:] = date2num(times, timestr)
nc.variables['time'].units = timestr
nc.variables['time'].standard_name = 'time'
# Projection
nc.createVariable('projection', 'i8')
nc.variables['projection'].proj4 = density_proj.definition_string()
# Coordinates
nc.createVariable('lon', 'f8', ('y', 'x'))
nc.createVariable('lat', 'f8', ('y', 'x'))
nc.variables['lon'][:] = lon_array.T
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].short_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lat'][:] = lat_array.T
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].short_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
# Density
nc.createVariable('density_surface', 'u1', ('time', 'y', 'x'))
H = np.swapaxes(H, 1, 2).astype('uint8')
H = np.ma.masked_where(H == 0, H)
nc.variables['density_surface'][:] = H
nc.variables['density_surface'].long_name = 'Detection probability'
nc.variables['density_surface'].grid_mapping = 'projection'
nc.variables['density_surface'].units = '1'
# Density submerged
nc.createVariable('density_submerged', 'u1', ('time', 'y', 'x'))
H_sub = np.swapaxes(H_submerged, 1, 2).astype('uint8')
H_sub = np.ma.masked_where(H_sub == 0, H_sub)
nc.variables['density_submerged'][:] = H_sub
nc.variables[
'density_submerged'].long_name = 'Detection probability submerged'
nc.variables['density_submerged'].grid_mapping = 'projection'
nc.variables['density_submerged'].units = '1'
# Density stranded
nc.createVariable('density_stranded', 'u1', ('time', 'y', 'x'))
H_stranded = np.swapaxes(H_stranded, 1, 2).astype('uint8')
H_stranded = np.ma.masked_where(H_stranded == 0, H_stranded)
nc.variables['density_stranded'][:] = H_stranded
nc.variables[
'density_stranded'].long_name = 'Detection probability stranded'
nc.variables['density_stranded'].grid_mapping = 'projection'
nc.variables['density_stranded'].units = '1'
nc.close()
def write_geotiff(self, filename, pixelsize_km=.2):
'''Write one GeoTiff image per timestep.
filename should contain date identifiers, e.g. 'img_%Y%m%d_%H%M.tif'
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
'''
try:
from osgeo import gdal, osr
except:
raise ValueError('GDAL is needed to write geotiff images.')
import matplotlib.pyplot as plt
driver = gdal.GetDriverByName('GTiff')
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
colortable = gdal.ColorTable()
colortable.SetColorEntry(0, (255, 255, 255, 0))
colortable.SetColorEntry(1, (0, 0, 0, 255))
colortable.SetColorEntry(2, (255, 0, 0, 255))
colortable.SetColorEntry(3, (0, 255, 0, 255))
colortable.SetColorEntry(4, (0, 0, 255, 255))
lon = self.get_property('lon')[0]
lat = self.get_property('lat')[0]
status = self.get_property('status')[0]
times = self.get_time_array()[0]
deltalat = pixelsize_km / 111.0 # km to degrees
deltalon = deltalat / np.cos(np.radians((lat.min() + lat.max()) / 2))
lat_array = np.arange(lat.min() - deltalat,
lat.max() + deltalat, deltalat)
lon_array = np.arange(lon.min() - deltalat,
lon.max() + deltalon, deltalon)
ilon = (np.round((lon - lon.min()) / deltalon)).astype(int)
ilat = (np.round((lat - lat.min()) / deltalat)).astype(int)
# Setting masked values to zero, for use as indices
ilon[ilon.mask] = 0
ilat[ilat.mask] = 0
status[ilon.mask] = 0
image = np.zeros(
(len(times), len(lon_array), len(lat_array))).astype(int)
geotransform = [
lon_array.min(), deltalon, 0,
lat_array.max(), 0, -deltalat
]
for i, t in enumerate(times):
image[i, ilon[i, :], ilat[i, :]] = status[i, :] + 1
filename_i = t.strftime(filename)
ds = driver.Create(
filename_i,
len(lon_array),
len(lat_array),
1,
gdal.GDT_Byte,
)
ds.SetProjection(srs.ExportToWkt())
ds.SetGeoTransform(geotransform)
outband = ds.GetRasterBand(1)
outband.SetNoDataValue(0)
outband.WriteArray(np.fliplr(image[i, :, :]).transpose())
outband.SetColorTable(colortable)
ds = None
def get_time_array(self):
"""Return a list of output times of last run."""
# Making sure start_time is datetime, and not cftime object
self.start_time = datetime(self.start_time.year, self.start_time.month,
self.start_time.day, self.start_time.hour,
self.start_time.minute,
self.start_time.second)
td = self.time_step_output
time_array = [
self.start_time + td * i for i in range(self.steps_output)
]
time_array_relative = [td * i for i in range(self.steps_output)]
return time_array, time_array_relative
def plot_environment(self, filename=None, ax=None, show=True):
"""Plot mean wind and current velocities of element of last run."""
x_wind = self.get_property('x_wind')[0]
y_wind = self.get_property('y_wind')[0]
wind = np.sqrt(x_wind**2 + y_wind**2)
x_sea_water_velocity = self.get_property('x_sea_water_velocity')[0]
y_sea_water_velocity = self.get_property('y_sea_water_velocity')[0]
current = np.sqrt(x_sea_water_velocity**2 + y_sea_water_velocity**2)
wind = np.ma.mean(wind, axis=1)
current = np.ma.mean(current, axis=1)
time, time_relative = self.get_time_array()
time = np.array([t.total_seconds() / 3600. for t in time_relative])
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(time, wind, 'b', label='Wind speed')
ax.set_ylabel('Wind speed [m/s]', color='b')
ax.set_xlim([0, time[-1]])
ax.set_ylim([0, wind.max() * 1.1])
ax2 = ax.twinx()
ax2.plot(time, current, 'r', label='Current speed')
ax2.set_ylabel('Current speed [m/s]', color='r')
ax2.set_xlim([0, time[-1]])
ax2.set_ylim([0, current.max() * 1.1])
for tl in ax.get_yticklabels():
tl.set_color('b')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax.set_xlabel('Time [hours]')
ax.legend(loc='upper left')
ax2.legend(loc='lower right')
if filename is None:
if show is True:
plt.show()
else:
plt.savefig(filename)
def plot_property(self, prop, filename=None, mean=False):
"""Basic function to plot time series of any element properties."""
import matplotlib.pyplot as plt
from matplotlib import dates
hfmt = dates.DateFormatter('%d %b %Y %H:%M')
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(hfmt)
plt.xticks(rotation='vertical')
start_time = self.start_time
# In case start_time is unsupported cftime
start_time = datetime(start_time.year, start_time.month,
start_time.day, start_time.hour,
start_time.minute, start_time.second)
times = [
start_time + n * self.time_step_output
for n in range(self.steps_output)
]
data = self.history[prop].T[0:len(times), :]
if mean is True: # Taking average over elements
data = np.mean(data, axis=1)
plt.plot(times, data)
plt.title(prop)
plt.xlabel('Time [UTC]')
try:
plt.ylabel('%s [%s]' %
(prop, self.elements.variables[prop]['units']))
except:
plt.ylabel(prop)
plt.subplots_adjust(bottom=.3)
plt.grid()
if filename is None:
plt.show()
else:
plt.savefig(filename)
def get_property(self, propname):
"""Get property from history, sorted by status."""
index_of_first, index_of_last = \
self.index_of_activation_and_deactivation()
prop = self.history[propname].copy()
status = self.history['status'].copy()
j = np.arange(status.shape[1])
# Fill arrays with last value before deactivation
for i in range(status.shape[0]):
status[i, j > index_of_last[i]] = status[i, index_of_last[i]]
prop[i, j > index_of_last[i]] = prop[i, index_of_last[i]]
return prop.T, status.T
def get_trajectory_lengths(self):
"""Calculate lengths and speeds along trajectories."""
lons = self.get_property('lon')[0]
lats = self.get_property('lat')[0]
geod = pyproj.Geod(ellps='WGS84')
a1, a2, distances = geod.inv(lons[0:-1, :], lats[0:-1, :],
lons[1::, :], lats[1::, :])
distances[np.isnan(distances)] = 0
speeds = distances / self.time_step_output.total_seconds()
distances[speeds >
100] = 0 # TODO: need better way to mask invalid distances
speeds[speeds > 100] = 0 # due to masked lons/lats arrays
total_length = np.cumsum(distances, 0)[-1, :]
return total_length, distances, speeds
def update_positions(self, x_vel, y_vel):
"""Move particles according to given velocity components.
This method shall account for projection metrics (a distance
on a map projection does not necessarily correspond to the same
distance over true ground (not yet implemented).
Arguments:
x_vel and v_vel: floats, velocities in m/s of particle along
x- and y-axes of the inherit SRS (proj4).
"""
geod = pyproj.Geod(ellps='WGS84')
azimuth = np.degrees(np.arctan2(x_vel, y_vel)) # Direction of motion
velocity = np.sqrt(x_vel**2 + y_vel**2) # Velocity in m/s
velocity = velocity * self.elements.moving # Do not move frosen elements
# Calculate new positions
self.elements.lon, self.elements.lat, back_az = geod.fwd(
self.elements.lon, self.elements.lat, azimuth,
velocity * self.time_step.total_seconds())
# Check that new positions are valid
if (self.elements.lon.min() < -180) or (
self.elements.lon.min() > 360
) or (self.elements.lat.min() < -90) or (self.elements.lat.max() > 90):
logger.info('Invalid new coordinates:')
logger.info(self.elements)
sys.exit('Quitting')
def __repr__(self):
"""String representation providing overview of model status."""
outStr = '===========================\n'
if self.history is not None:
outStr += self.performance()
outStr += '===========================\n'
outStr += 'Model:\t' + type(self).__name__ + \
' (OpenDrift version %s)\n' % opendrift.__version__
outStr += '\t%s active %s particles (%s deactivated, %s scheduled)\n'\
% (self.num_elements_active(), self.ElementType.__name__,
self.num_elements_deactivated(), self.num_elements_scheduled())
variable_groups, reader_groups, missing = self.get_reader_groups()
outStr += '-------------------\n'
outStr += 'Environment variables:\n'
for i, variableGroup in enumerate(variable_groups):
outStr += ' -----\n'
readerGroup = reader_groups[i]
for variable in sorted(variableGroup):
outStr += ' ' + variable + '\n'
for i, reader in enumerate(readerGroup):
outStr += ' ' + str(i + 1) + ') ' + reader + '\n'
if len(self.missing_variables()) > 0:
outStr += ' -----\n'
outStr += 'Readers not added for the following variables:\n'
for variable in sorted(self.missing_variables()):
outStr += ' ' + variable + '\n'
lazy_readers = [
r for r in self.readers if self.readers[r].is_lazy is True
]
if len(lazy_readers) > 0:
outStr += '---\nLazy readers:\n'
for lr in lazy_readers:
outStr += ' ' + lr + '\n'
if hasattr(self, 'discarded_readers'):
outStr += '\nDiscarded readers:\n'
for dr, reason in self.discarded_readers.items():
outStr += ' %s (%s)\n' % (dr, reason)
if hasattr(self, 'time'):
outStr += '\nTime:\n'
outStr += '\tStart: %s\n' % (self.start_time)
outStr += '\tPresent: %s\n' % (self.time)
if hasattr(self, 'time_step'):
outStr += '\tCalculation steps: %i * %s - total time: %s\n' % (
self.steps_calculation, self.time_step,
self.time - self.start_time)
outStr += '\tOutput steps: %i * %s\n' % (self.steps_output,
self.time_step_output)
if hasattr(self, 'messages'):
outStr += '-------------------\n'
outStr += self.get_messages()
outStr += '===========================\n'
return outStr
def store_message(self, message):
"""Store important messages to be displayed to user at end."""
if not hasattr(self, 'messages'):
self.messages = []
self.messages.append(message)
def get_messages(self):
"""Report any messages stored during simulation."""
if hasattr(self, 'messages'):
return str(self.messages).strip('[]') + '\n'
else:
return ''
def add_halo_readers(self):
"""Adding some Thredds and file readers in prioritised order"""
self.add_readers_from_file(self.test_data_folder() +
'../../opendrift/scripts/data_sources.txt')
def _sphinx_gallery_filename(self, stack_offset=3):
# This assumes that the calling script is three frames up in the stack.
# called through a more deeply nested method stack_offset has to be changed.
caller = inspect.stack()[stack_offset]
caller = os.path.splitext(os.path.basename(caller.filename))[0]
# Calling script is string input (e.g. from ..plot::)
if caller == '<string>':
caller = 'plot_directive'
adir = os.path.realpath('../source/gallery/animations')
else:
adir = os.path.realpath('../docs/source/gallery/animations')
if not hasattr(OpenDriftSimulation, '__anim_no__'):
OpenDriftSimulation.__anim_no__ = {}
if caller not in OpenDriftSimulation.__anim_no__:
OpenDriftSimulation.__anim_no__[caller] = 0
os.makedirs(adir, exist_ok=True)
filename = '%s_%d.gif' % (caller,
OpenDriftSimulation.__anim_no__[caller])
OpenDriftSimulation.__anim_no__[caller] += 1
filename = os.path.join(adir, filename)
return filename
def __save_animation__(self, fig, plot_timestep, filename, frames, fps,
blit, interval):
if filename is None or 'sphinx_gallery' in sys.modules:
filename = self._sphinx_gallery_filename(stack_offset=4)
logger.info('Saving animation to ' + str(filename) + '...')
start_time = datetime.now()
writer = None
if str(filename)[-4:] == '.gif':
writer = animation.PillowWriter(fps=fps)
# writer=animation.ImageMagickWriter(fps=fps)
elif str(filename)[-4:] == '.mp4':
writer = animation.FFMpegWriter(
fps=fps,
codec='libx264',
bitrate=1800,
extra_args=[
'-profile:v',
'baseline',
'-vf',
'crop=trunc(iw/2)*2:trunc(ih/2)*2', # cropping 1 pixel if not even
'-pix_fmt',
'yuv420p',
'-an'
])
else:
# fallback to using funcwriter
anim = animation.FuncAnimation(fig,
plot_timestep,
blit=blit,
frames=frames,
interval=interval)
anim.save(filename)
if writer is not None:
with writer.saving(fig, filename, None):
for i in range(frames):
plot_timestep(i)
writer.grab_frame()
logger.debug(f"MPLBACKEND = {matplotlib.get_backend()}")
logger.debug(f"DISPLAY = {os.environ.get('DISPLAY', 'None')}")
logger.debug('Time to save animation: %s' %
(datetime.now() - start_time))
plt.close()
def calculate_ftle(self,
reader=None,
delta=None,
domain=None,
time=None,
time_step=None,
duration=None,
z=0,
RLCS=True,
ALCS=True):
if reader is None:
logger.info('No reader provided, using first available:')
reader = list(self.readers.items())[0][1]
logger.info(reader.name)
if isinstance(reader, pyproj.Proj):
proj = reader
elif isinstance(reader, str):
proj = pyproj.Proj(reader)
else:
proj = reader.proj
from opendrift.models.physics_methods import ftle
if not isinstance(duration, timedelta):
duration = timedelta(seconds=duration)
if domain == None:
xs = np.arange(reader.xmin, reader.xmax, delta)
ys = np.arange(reader.ymin, reader.ymax, delta)
else:
xmin, xmax, ymin, ymax = domain
xs = np.arange(xmin, xmax, delta)
ys = np.arange(ymin, ymax, delta)
X, Y = np.meshgrid(xs, ys)
lons, lats = proj(X, Y, inverse=True)
if time is None:
time = reader.start_time
if not isinstance(time, list):
time = [time]
# dictionary to hold LCS calculation
lcs = {'time': time, 'lon': lons, 'lat': lats}
lcs['RLCS'] = np.zeros((len(time), len(ys), len(xs)))
lcs['ALCS'] = np.zeros((len(time), len(ys), len(xs)))
T = np.abs(duration.total_seconds())
for i, t in enumerate(time):
logger.info('Calculating LCS for ' + str(t))
# Forwards
if RLCS is True:
self.reset()
self.seed_elements(lons.ravel(), lats.ravel(), time=t, z=z)
self.run(duration=duration, time_step=time_step)
f_x1, f_y1 = proj(self.history['lon'].T[-1].reshape(X.shape),
self.history['lat'].T[-1].reshape(X.shape))
lcs['RLCS'][i, :, :] = ftle(f_x1 - X, f_y1 - Y, delta, T)
# Backwards
if ALCS is True:
self.reset()
self.seed_elements(lons.ravel(),
lats.ravel(),
time=t + duration,
z=z)
self.run(duration=duration, time_step=-time_step)
b_x1, b_y1 = proj(self.history['lon'].T[-1].reshape(X.shape),
self.history['lat'].T[-1].reshape(X.shape))
lcs['ALCS'][i, :, :] = ftle(b_x1 - X, b_y1 - Y, delta, T)
lcs['RLCS'] = np.ma.masked_invalid(lcs['RLCS'])
lcs['ALCS'] = np.ma.masked_invalid(lcs['ALCS'])
# Flipping ALCS left-right. Not sure why this is needed
lcs['ALCS'] = lcs['ALCS'][:, ::-1, ::-1]
return lcs
def center_of_gravity(self, onlysurface=False):
"""
calculate center of mass and variance of all elements
returns (lon,lat), variance
where (lon,lat) are the coordinates of the center of mass as
function of time"""
#lon,lat = self.get_property('lon')[0], self.get_property('lat')[0]
lon, lat = self.history['lon'], self.history['lat']
x, y = self.proj_latlon(lon, lat)
if onlysurface == True:
z = self.history['z']
submerged = z < 0
x = np.ma.array(x, mask=submerged)
y = np.ma.array(y, mask=submerged)
# center of gravity:
x_m, y_m = np.ma.mean(x, axis=0), np.ma.mean(y, axis=0)
center = self.proj_latlon(x_m, y_m, inverse=True)
one = np.ones_like(x)
# variance:
variance = np.ma.mean((x - x_m * one)**2 + (y - y_m * one)**2, axis=0)
return center, variance
def reset(self):
"""Preparing OpenDrift object for new run"""
if not hasattr(self, 'start_time'):
logger.info('Nothing to reset')
return
for attr in ['start_time', 'elements']:
if hasattr(self, attr):
delattr(self, attr)
#del self.start_time
self.history = None
#del self.elements
self.elements_deactivated = self.ElementType() # Empty array
self.elements = self.ElementType() # Empty array
def gui_postproc(self):
'''To be overloaded by subclasses'''
pass
|
OpenDrift/opendrift
|
opendrift/models/basemodel.py
|
Python
|
gpl-2.0
| 238,459
|
[
"Gaussian",
"NetCDF"
] |
ac19b9de9ee942b6cf6bbe2c3fe8545657c3d6f14a2d4085542d17762469a3f9
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import testtools
from neutron.agent import firewall
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.conf.agent import common as agent_config
from neutron.conf.agent import securitygroups_rpc as security_config
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
# TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
# TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 4097
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 4097
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 4098
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 4098
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 4098
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 4098
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 4105
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 4105
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
mock.patch('eventlet.spawn_n').start()
security_config.register_securitygroups_opts()
agent_config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
# don't mess with sysctl knobs in unit tests
self.firewall._enabled_netfilter_for_bridges = True
# initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 4098, '8f46cf18-12': 4105,
'95c24827-02': 4098, 'e804433b-61': 4097}
get_rules_for_table_func = lambda x: RAW_TABLE_OUTPUT.split('\n')
filtered_ports = {port_id: self._fake_port()
for port_id in self._dev_zone_map}
self.firewall.ipconntrack = ip_conntrack.IpConntrackManager(
get_rules_for_table_func, filtered_ports=filtered_ports,
unfiltered_ports=dict())
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port_by_num(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '6',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_dccp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'dccp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p dccp -m dccp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_sctp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'sctp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p sctp -m sctp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_blank(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': ''}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_zero(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '0'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_encap(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'encap'}
ingress = mock.call.add_rule('ifake_dev',
'-p encap -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_encap_by_num(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '98'}
ingress = mock.call.add_rule('ifake_dev',
'-p encap -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_protocol_999_local(self):
# There is no protocol 999, so let's return a mapping
# that says there is and make sure the rule is created
# using the name and not the number.
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': '999'}
ingress = mock.call.add_rule('ifake_dev',
'-p fooproto -j RETURN',
top=False, comment=None)
egress = None
with mock.patch.object(self.firewall,
'_local_protocol_name_map') as lpnm:
lpnm.return_value = {'999': 'fooproto'}
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_dest_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_source_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p tcp -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -p icmp -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 'echo-request',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type echo-request '
'-j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code_protocol_num(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': '1',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p udp -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p ipv6-icmp -j RETURN', top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p tcp -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p ipv6-icmp -j RETURN', top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -p ipv6-icmp -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 'echo-request',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
'-j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code_protocol_num(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': '58',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code_protocol_legacy_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmpv6',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p udp -j RETURN' % prefix,
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, top=False, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_process_trusted_ports(self, configured):
port = self._fake_port()
port['id'] = 'tapfake_dev'
calls = [
mock.call.add_chain('sg-fallback'),
mock.call.add_rule('sg-fallback',
'-j DROP', comment=ic.UNMATCH_DROP)]
if configured:
self.firewall.trusted_ports.append(port['id'])
else:
calls.append(
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j ACCEPT',
top=False, comment=ic.TRUSTED_ACCEPT))
calls.append(
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j ACCEPT',
top=False, comment=ic.TRUSTED_ACCEPT))
self.firewall.process_trusted_ports([port['id']])
for filter_inst in [self.v4filter_inst, self.v6filter_inst]:
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
self.assertIn(port['id'], self.firewall.trusted_ports)
def test_process_trusted_ports(self):
self._test_process_trusted_ports(False)
def test_process_trusted_ports_already_configured(self):
self._test_process_trusted_ports(True)
def _test_remove_trusted_ports(self, configured):
port = self._fake_port()
port['id'] = 'tapfake_dev'
calls = [
mock.call.add_chain('sg-fallback'),
mock.call.add_rule('sg-fallback',
'-j DROP', comment=ic.UNMATCH_DROP)]
if configured:
self.firewall.trusted_ports.append(port['id'])
calls.append(
mock.call.remove_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j ACCEPT'))
calls.append(
mock.call.remove_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j ACCEPT'))
self.firewall.remove_trusted_ports([port['id']])
for filter_inst in [self.v4filter_inst, self.v6filter_inst]:
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
self.assertNotIn(port['id'], self.firewall.trusted_ports)
def test_remove_trusted_ports(self):
self._test_remove_trusted_ports(True)
def test_process_remove_ports_not_configured(self):
self._test_remove_trusted_ports(False)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
top=False, comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev',
'-s ::/128 -d ff02::/16 '
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j RETURN' %
icmp6_type, top=False,
comment=None) for icmp6_type
in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
top=False, comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p ipv6-icmp -m icmp6 --icmpv6-type '
'%s -j RETURN' %
icmp6_type, top=False, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW)]
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('sfake_dev',
'-s fe80::fdff:ffff:feff:ffff/128 -m mac '
'--mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW))
calls.append(mock.call.add_rule('sfake_dev', '-j DROP',
comment=ic.PAIR_DROP))
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
top=False, comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
top=False, comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
top=False, comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j DROP' %
constants.ICMPV6_TYPE_RA,
top=False, comment=None))
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -j RETURN',
top=False, comment=None))
calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 --dport 547 '
'-j RETURN',
top=False, comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 --dport 546 -j DROP',
top=False, comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol, direction,
ct_zone):
port = self._fake_port()
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
# process conntrack updates in the queue
while not self.firewall.ipconntrack._queue.empty():
self.firewall.ipconntrack._process_queue()
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
cmd.extend(['-w', ct_zone])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction, ct_zone=10)
def test_remove_conntrack_entries_for_delete_rule_ipv4_no_ct_zone(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction, ct_zone=None)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction, ct_zone=10)
def test_remove_conntrack_entries_for_delete_rule_ipv6_no_ct_zone(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction, ct_zone=None)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
self._test_remove_conntrack_entries_for_port_sec_group_change(
ct_zone=10)
def test_remove_conntrack_entries_for_port_sec_group_change_no_ct_zone(
self):
self._test_remove_conntrack_entries_for_port_sec_group_change(
ct_zone=None)
def _get_expected_conntrack_calls(self, ips, ct_zone):
expected_calls = []
for ip_item in ips:
proto = ip_item[0]
ip = ip_item[1]
for direction in ['-d', '-s']:
cmd = ['conntrack', '-D', '-f', proto, direction, ip]
if ct_zone:
cmd.extend(['-w', ct_zone])
expected_calls.append(
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]))
return expected_calls
def _test_remove_conntrack_entries_for_port_sec_group_change(self,
ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
# process conntrack updates in the queue
while not self.firewall.ipconntrack._queue.empty():
self.firewall.ipconntrack._process_queue()
calls = self._get_expected_conntrack_calls(
[('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone)
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', direction, ct_zone=10)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4_no_ct_zone(
self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', direction, ct_zone=None)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', direction, ct_zone=10)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6_no_ct_zone(
self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', direction, ct_zone=None)
def _test_remove_conntrack_entries_sg_member_changed(self, ethertype,
direction, ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
port['security_group_source_groups'] = ['fake_sg_id2']
port['security_group_rules'] = [{'security_group_id': 'fake_sg_id',
'direction': direction,
'remote_group_id': 'fake_sg_id2',
'ethertype': ethertype}]
self.firewall.filtered_ports = {port['device']: port}
if ethertype == "IPv4":
ethertype = "ipv4"
members_add = {'IPv4': ['10.0.0.2', '10.0.0.3']}
members_after_delete = {'IPv4': ['10.0.0.3']}
else:
ethertype = "ipv6"
members_add = {'IPv6': ['fe80::2', 'fe80::3']}
members_after_delete = {'IPv6': ['fe80::3']}
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
# add ['10.0.0.2', '10.0.0.3'] or ['fe80::2', 'fe80::3']
self.firewall.security_group_updated('sg_member', ['fake_sg_id2'])
self.firewall.update_security_group_members(
'fake_sg_id2', members_add)
# delete '10.0.0.2' or 'fe80::2'
self.firewall.security_group_updated('sg_member', ['fake_sg_id2'])
self.firewall.update_security_group_members(
'fake_sg_id2', members_after_delete)
# check conntrack deletion from '10.0.0.1' to '10.0.0.2' or
# from 'fe80::1' to 'fe80::2'
ips = {"ipv4": ['10.0.0.1', '10.0.0.2'],
"ipv6": ['fe80::1', 'fe80::2']}
calls = []
# process conntrack updates in the queue
while not self.firewall.ipconntrack._queue.empty():
self.firewall.ipconntrack._process_queue()
for direction in ['ingress', 'egress']:
direction = '-d' if direction == 'ingress' else '-s'
remote_ip_direction = '-s' if direction == '-d' else '-d'
conntrack_cmd = ['conntrack', '-D', '-f', ethertype,
direction, ips[ethertype][0]]
if not ct_zone:
continue
conntrack_cmd.extend(['-w', 10])
conntrack_cmd.extend([remote_ip_direction, ips[ethertype][1]])
calls.append(mock.call(conntrack_cmd,
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]))
self.utils_exec.assert_has_calls(calls)
def test_user_sg_rules_deduped_before_call_to_iptables_manager(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}] * 2
self.firewall.prepare_port_filter(port)
rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls]
self.assertEqual(len(set(rules)), len(rules))
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_delete_conntrack_from_delete_port(self):
self._test_delete_conntrack_from_delete_port(ct_zone=10)
def test_delete_conntrack_from_delete_port_no_ct_zone(self):
self._test_delete_conntrack_from_delete_port(ct_zone=None)
def _test_delete_conntrack_from_delete_port(self, ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports = {'tapfake_dev': port}
self.firewall.devices_with_updated_sg_members['fake_sg_id2'
] = ['tapfake_dev']
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
new_port['device'] = ['tapfake_dev2']
new_port['fixed_ips'] = ['10.0.0.2', 'fe80::2']
self.firewall.sg_members['fake_sg_id2'] = {'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::2']}
mock.patch.object(self.firewall.ipconntrack, 'get_device_zone',
return_value=ct_zone).start()
self.firewall.remove_port_filter(port)
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
# process conntrack updates in the queue
while not self.firewall.ipconntrack._queue.empty():
self.firewall.ipconntrack._process_queue()
calls = self._get_expected_conntrack_calls(
[('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone)
self.utils_exec.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occurs
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare',
'network_id': 'fake_net'}
port_update = {'device': 'd1', 'mac_address': 'update',
'network_id': 'fake_net'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
top=False, comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
top=False, comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
top=True, comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
top=False, comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
top=False, comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
top=False, comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
top=False, comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_name_exists.return_value = True
self.firewall.ipset.set_members = mock.Mock(return_value=([], []))
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in remote_groups.items():
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_update_security_group_members(self):
sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_single_fallback_accept_rule(self):
p1, p2 = self._fake_port(), self._fake_port()
self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
sg_chain_v4_accept = [call for call in v4_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
sg_chain_v6_accept = [call for call in v6_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
self.assertEqual(1, len(sg_chain_v4_accept))
self.assertEqual(1, len(sg_chain_v6_accept))
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_name_exists('NIPv4fake_sgid'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_name_exists('NIPv6fake_sgid'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ff0f-ffff'
mac_unix = 'FF:FF:FF:0F:FF:FF'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff'))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
# ensure that LLA is not added again for another v6 addr
ipv62 = 'fe81::1'
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv62,
mac_ipv4_pairs, mac_ipv6_pairs)
fake_ipv6_pair.append((mac_unix, ipv62))
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def test__populate_initial_zone_map(self):
self.assertEqual(self._dev_zone_map,
self.firewall.ipconntrack._device_zone_map)
def test__generate_device_zone(self):
# initial data has 4097, 4098, and 4105 in use.
# we fill from top up first.
self.assertEqual(4106,
self.firewall.ipconntrack._generate_device_zone('test'))
# once it's maxed out, it scans for gaps
self.firewall.ipconntrack._device_zone_map['someport'] = (
ip_conntrack.MAX_CONNTRACK_ZONES)
for i in range(4099, 4105):
self.assertEqual(i,
self.firewall.ipconntrack._generate_device_zone(i))
# 4105 and 4106 are taken so next should be 4107
self.assertEqual(4107,
self.firewall.ipconntrack._generate_device_zone('p11'))
# take out zone 4097 and make sure it's selected
self.firewall.ipconntrack._device_zone_map.pop('e804433b-61')
self.assertEqual(4097,
self.firewall.ipconntrack._generate_device_zone('p1'))
# fill it up and then make sure an extra throws an error
for i in range(ip_conntrack.ZONE_START,
ip_conntrack.MAX_CONNTRACK_ZONES):
self.firewall.ipconntrack._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall.ipconntrack._find_open_zone()
# with it full, try again, this should trigger a cleanup
# and return 4097
self.assertEqual(ip_conntrack.ZONE_START,
self.firewall.ipconntrack._generate_device_zone('p12'))
self.assertEqual({'p12': ip_conntrack.ZONE_START},
self.firewall.ipconntrack._device_zone_map)
def test_get_device_zone(self):
dev = {'device': 'tap1234', 'network_id': '12345678901234567'}
# initial data has 4097, 4098, and 4105 in use.
self.assertEqual(4106, self.firewall.ipconntrack.get_device_zone(dev))
# should have been truncated to 11 chars
self._dev_zone_map.update({'12345678901': 4106})
self.assertEqual(self._dev_zone_map,
self.firewall.ipconntrack._device_zone_map)
def test_multiple_firewall_with_common_conntrack(self):
self.firewall1 = iptables_firewall.OVSHybridIptablesFirewallDriver()
self.firewall2 = iptables_firewall.OVSHybridIptablesFirewallDriver()
self.assertEqual(id(self.firewall1.ipconntrack),
id(self.firewall2.ipconntrack))
|
noironetworks/neutron
|
neutron/tests/unit/agent/linux/test_iptables_firewall.py
|
Python
|
apache-2.0
| 105,760
|
[
"FEFF"
] |
943a64f8ad62abe7664102cb46d7c8c6cd3b06b08e602e14c92c7b51fde49370
|
import glob
import logging
import os
import subprocess
from plugins import BaseAligner
from yapsy.IPlugin import IPlugin
from assembly import get_qual_encoding
logger = logging.getLogger(__name__)
class Bowtie2Aligner(BaseAligner, IPlugin):
def run(self):
"""
Map READS to CONTIGS and return alignment.
Set MERGED_PAIR to True if reads[1] is a merged
paired end file
"""
contig_file = self.data.contigfiles[0]
reads = self.data.readfiles
## Index contigs
prefix = os.path.join(self.outpath, 'bt2')
cmd_args = [self.build_bin, '-f', contig_file, prefix]
self.arast_popen(cmd_args, overrides=False)
### Align reads
bamfiles = []
for i, readset in enumerate(self.data.readsets):
samfile = os.path.join(self.outpath, 'align.sam')
reads = readset.files
cmd_args = [self.executable, '-x', prefix, '-S', samfile,
'-p', self.process_threads_allowed]
if len(reads) == 2:
cmd_args += ['-1', reads[0], '-2', reads[1]]
elif len(reads) == 1:
cmd_args += ['-U', reads[0]]
else:
raise Exception('Bowtie plugin error')
self.arast_popen(cmd_args, overrides=False)
if not os.path.exists(samfile):
raise Exception('Unable to complete alignment')
## Convert to BAM
bamfile = samfile.replace('.sam', '.bam')
cmd_args = ['samtools', 'view',
'-bSho', bamfile, samfile]
self.arast_popen(cmd_args)
bamfiles.append(bamfile)
### Merge samfiles if multiple
if len(bamfiles) > 1:
bamfile = os.path.join(self.outpath, '{}_{}.bam'.format(os.path.basename(contig_file), i))
self.arast_popen(['samtools', 'merge', bamfile] + bamfiles)
if not os.path.exists(bamfile):
raise Exception('Unable to complete alignment')
else:
bamfile = bamfiles[0]
if not os.path.exists(bamfile):
raise Exception('Unable to complete alignment')
## Convert back to sam
samfile = bamfile.replace('.bam', '.sam')
self.arast_popen(['samtools', 'view', '-h', '-o', samfile, bamfile])
return {'alignment': samfile,
'alignment_bam': bamfile}
|
kbase/assembly
|
lib/assembly/plugins/bowtie2.py
|
Python
|
mit
| 2,436
|
[
"Bowtie"
] |
f7f9f633e2535bd7588b4e35184a1875c53f7af9e86658cf1b78c9d03f5a1657
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-15 10:06:03 by Brian McFee <brian.mcfee@nyu.edu>
"""Helpful tools for deprecation"""
import warnings
from inspect import signature, isclass, Parameter
from functools import wraps
from decorator import decorator
__all__ = ["moved", "deprecated", "deprecate_positional_args"]
def moved(*, moved_from, version, version_removed):
"""This is a decorator which can be used to mark functions
as moved/renamed.
It will result in a warning being emitted when the function is used.
"""
def __wrapper(func, *args, **kwargs):
"""Warn the user, and then proceed."""
warnings.warn(
"{:s}\n\tThis function was moved to '{:s}.{:s}' in "
"librosa version {:s}."
"\n\tThis alias will be removed in librosa version "
"{:s}.".format(
moved_from, func.__module__, func.__name__, version, version_removed
),
category=DeprecationWarning,
stacklevel=3, # Would be 2, but the decorator adds a level
)
return func(*args, **kwargs)
return decorator(__wrapper)
def deprecated(*, version, version_removed):
"""This is a decorator which can be used to mark functions
as deprecated.
It will result in a warning being emitted when the function is used."""
def __wrapper(func, *args, **kwargs):
"""Warn the user, and then proceed."""
warnings.warn(
"{:s}.{:s}\n\tDeprecated as of librosa version {:s}."
"\n\tIt will be removed in librosa version {:s}.".format(
func.__module__, func.__name__, version, version_removed
),
category=DeprecationWarning,
stacklevel=3, # Would be 2, but the decorator adds a level
)
return func(*args, **kwargs)
return decorator(__wrapper)
# Borrowed from sklearn
def deprecate_positional_args(func=None, *, version="0.10"):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="0.10"
The version when positional arguments will result in error.
"""
def _inner_deprecate_positional_args(f):
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = [
"{}={}".format(name, arg)
for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
]
args_msg = ", ".join(args_msg)
warnings.warn(
f"Pass {args_msg} as keyword args. From version "
f"{version} passing these as positional arguments "
"will result in an error",
FutureWarning,
stacklevel=2,
)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
if func is not None:
return _inner_deprecate_positional_args(func)
return _inner_deprecate_positional_args
|
librosa/librosa
|
librosa/util/decorators.py
|
Python
|
isc
| 3,722
|
[
"Brian"
] |
49919ee294d5655309cadd1dcc7ca463aed8daf79abacdff92e33c0d96ace536
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 03/27/2015 #
# #
# Original code from Xiaojing Huang (xjhuang@bnl.gov) and Li Li #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
import time
from scipy.ndimage.filters import gaussian_filter
import logging
logger = logging.getLogger(__name__)
def _dist(dims):
"""
Create array with pixel value equals to the distance from array center.
Parameters
----------
dims : list or tuple
shape of array to create
Returns
-------
arr : np.ndarray
ND array whose pixels are equal to the distance from the center
of the array of shape `dims`
"""
dist_sum = []
shape = np.ones(len(dims), dtype=np.int)
for idx, d in enumerate(dims):
vec = (np.arange(d) - d // 2) ** 2
shape[idx] = -1
vec = vec.reshape(*shape)
shape[idx] = 1
dist_sum.append(vec)
return np.sqrt(np.sum(dist_sum, axis=0))
def gauss(dims, sigma):
"""
Generate Gaussian function in 2D or 3D.
Parameters
----------
dims : list or tuple
shape of the data
sigma : float
standard deviation of gaussian function
Returns
-------
arr : array
ND gaussian
"""
x = _dist(dims)
y = np.exp(-(x / sigma)**2 / 2)
return y / np.sum(y)
def pi_modulus(recon_pattern,
diffracted_pattern,
offset_v=1e-12):
"""
Transfer sample from real space to q space.
Use constraint based on diffraction pattern from experiments.
Parameters
----------
recon_pattern : array
reconstructed pattern in real space
diffracted_pattern : array
diffraction pattern from experiments
offset_v : float, optional
add small value to avoid the case of dividing something by zero
Returns
-------
array :
updated pattern in real space
"""
diff_tmp = np.fft.fftn(recon_pattern) / np.sqrt(np.size(recon_pattern))
index = diffracted_pattern > 0
diff_tmp[index] = (diffracted_pattern[index] *
diff_tmp[index] / (np.abs(diff_tmp[index]) + offset_v))
return np.fft.ifftn(diff_tmp) * np.sqrt(np.size(diffracted_pattern))
def find_support(sample_obj,
sw_sigma, sw_threshold):
"""
Update sample area based on thresholds.
Parameters
----------
sample_obj : array
sample for reconstruction
sw_sigma : float
sigma for gaussian in shrinkwrap method
sw_threshold : float
threshold used in shrinkwrap method
Returns
-------
array :
index of sample support
"""
sample_obj = np.abs(sample_obj)
conv_fun = gaussian_filter(sample_obj, sw_sigma)
conv_max = np.max(conv_fun)
return conv_fun >= (sw_threshold*conv_max)
def cal_diff_error(sample_obj, diffracted_pattern):
"""
Calculate the error in q space.
Parameters
----------
sample_obj : array
sample data
diffracted_pattern : array
diffraction pattern from experiments
Returns
-------
float :
relative error in q space
"""
new_diff = np.abs(np.fft.fftn(sample_obj)) / np.sqrt(np.size(sample_obj))
return (np.linalg.norm(new_diff - diffracted_pattern) /
np.linalg.norm(diffracted_pattern))
def generate_random_phase_field(diffracted_pattern):
"""
Initiate random phase.
Parameters
----------
diffracted_pattern : array
diffraction pattern from experiments
Returns
-------
sample_obj : array
sample information with phase
"""
pha_tmp = np.random.uniform(0, 2*np.pi, diffracted_pattern.shape)
sample_obj = (np.fft.ifftn(diffracted_pattern * np.exp(1j*pha_tmp)) *
np.sqrt(np.size(diffracted_pattern)))
return sample_obj
def generate_box_support(sup_radius, shape_v):
"""
Generate support area as a box for either 2D or 3D cases.
Parameters
----------
sup_radius : float
radius of support
shape_v : list
shape of diffraction pattern, which can be either 2D or 3D case.
Returns
-------
sup : array
support with a box area
"""
slc_list = [slice(s//2 - sup_radius, s//2 + sup_radius) for s in shape_v]
sup = np.zeros(shape_v)
sup[tuple(slc_list)] = 1
return sup
def generate_disk_support(sup_radius, shape_v):
"""
Generate support area as a disk for either 2D or 3D cases.
Parameters
----------
sup_radius : float
radius of support
shape_v : list
shape of diffraction pattern, which can be either 2D or 3D case.
Returns
-------
sup : array
support with a disk area
"""
sup = np.zeros(shape_v)
dummy = _dist(shape_v)
sup[dummy < sup_radius] = 1
return sup
def cdi_recon(diffracted_pattern, sample_obj, sup,
beta=1.15, start_avg=0.8, pi_modulus_flag='Complex',
sw_flag=True, sw_sigma=0.5, sw_threshold=0.1, sw_start=0.2,
sw_end=0.8, sw_step=10, n_iterations=1000,
cb_function=None, cb_step=10):
"""
Run reconstruction with difference map algorithm.
Parameters
----------
diffracted_pattern : array
diffraction pattern from experiments
sample_obj : array
initial sample with phase, complex number
sup : array
initial support
beta : float, optional
feedback parameter for difference map algorithm.
default is 1.15.
start_avg : float, optional
define the point to start doing average.
default is 0.8.
pi_modulus_flag : {'complex', 'real'}, optional
'complex' or 'real', defining the way to perform pi_modulus
calculation.
default is 'Complex'.
sw_flag : Bool, optional
flag to use shrinkwrap algorithm or not.
default is True.
sw_sigma : float, optional
gaussian width used in sw algorithm.
default is 0.5.
sw_threshold : float, optional
shreshold cut in sw algorithm.
default is 0.1.
sw_start : float, optional
at which point to start to do shrinkwrap.
defualt is 0.2
sw_end : float, optional
at which point to stop shrinkwrap.
defualt is 0.8
sw_step : float, optional
the frequency to perform sw algorithm.
defualt is 10
n_iterations : int, optional
number of iterations to run.
default is 1000.
cb_function : function, optional
This is a callback function that expects to receive these
four objects: sample_obj, obj_error, diff_error, sup_error.
Sample_obj is a 2D array. And obj_error, diff_error, and sup_error
are 1D array.
cb_step : int, optional
define plotting frequency, i.e., if plot_step = 10, plot results
after every 10 iterations.
Returns
-------
obj_avg : array
reconstructed sample object
error_dict : dict
Error information for all iterations. The dict keys include
obj_error, diff_error and sup_error. Obj_error is a list of
the relative error of sample object. Diff_error is calculated as
the difference between new diffraction pattern and the original
diffraction pattern. And sup_error stores the size of the
sample support.
References
----------
.. [1] V. Elser, "Phase retrieval by iterated projections",
J. Opt. Soc. Am. A, vol. 20, No. 1, 2003
"""
diffracted_pattern = np.array(diffracted_pattern) # diffraction data
diffracted_pattern = np.fft.fftshift(diffracted_pattern)
pi_modulus_flag = pi_modulus_flag.lower()
real_operation = False
if pi_modulus_flag == 'real':
real_operation = True
elif pi_modulus_flag == 'complex':
real_operation = False
else:
raise ValueError('py_modulus_flag must be one of {"complex",'
'real"} not' '{!r}'.format(pi_modulus_flag))
gamma_1 = -1/beta
gamma_2 = 1/beta
# get support index
outside_sup_index = sup != 1
error_dict = {}
obj_error = np.zeros(n_iterations)
diff_error = np.zeros(n_iterations)
sup_error = np.zeros(n_iterations)
sup_old = np.zeros_like(diffracted_pattern)
obj_avg = np.zeros_like(diffracted_pattern).astype(complex)
avg_i = 0
time_start = time.time()
for n in range(n_iterations):
obj_old = np.array(sample_obj)
obj_a = pi_modulus(sample_obj, diffracted_pattern)
if real_operation:
obj_a = np.abs(obj_a)
obj_a = (1 + gamma_2) * obj_a - gamma_2 * sample_obj
obj_a[outside_sup_index] = 0 # define support
obj_b = np.array(sample_obj)
obj_b[outside_sup_index] = 0 # define support
obj_b = (1 + gamma_1) * obj_b - gamma_1 * sample_obj
obj_b = pi_modulus(obj_b, diffracted_pattern)
if real_operation:
obj_b = np.abs(obj_b)
sample_obj += beta * (obj_a - obj_b)
# calculate errors
obj_error[n] = (np.linalg.norm(sample_obj - obj_old) /
np.linalg.norm(obj_old))
diff_error[n] = cal_diff_error(sample_obj, diffracted_pattern)
if sw_flag:
if((n >= (sw_start * n_iterations)) and
(n <= (sw_end * n_iterations))):
if np.mod(n, sw_step) == 0:
logger.info('Refine support with shrinkwrap')
sup_index = find_support(sample_obj, sw_sigma,
sw_threshold)
sup = np.zeros_like(diffracted_pattern)
sup[sup_index] = 1
outside_sup_index = sup != 1
sup_error[n] = np.sum(sup_old)
sup_old = np.array(sup)
if cb_function and n_iterations % cb_step == 0:
cb_function(sample_obj, obj_error, diff_error, sup_error)
if n > start_avg*n_iterations:
obj_avg += sample_obj
avg_i += 1
logger.info('%d object_chi= %f, diff_chi=%f' % (n, obj_error[n],
diff_error[n]))
obj_avg = obj_avg / avg_i
time_end = time.time()
logger.info('%d iterations takes %f sec' % (n_iterations,
time_end - time_start))
error_dict['obj_error'] = obj_error
error_dict['diff_error'] = diff_error
error_dict['sup_error'] = sup_error
return obj_avg, error_dict
|
tacaswell/scikit-xray
|
skbeam/core/cdi.py
|
Python
|
bsd-3-clause
| 13,332
|
[
"Gaussian"
] |
e4ca460e53b600bc006c1b46c123ca59a84204331cc8ee41f0725d656437f4f1
|
"""
CBMPy: fluxmodules main module
This module contains convenience methods for module related computations.
For most users this is the only relevant module.
=====================
PySCeS Constraint Based Modelling (http://cbmpy.sourceforge.net)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Arne C. Reimers
Contact email: arne.c.reimers@gmail.com
"""
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
#from __future__ import unicode_literals
from math import isnan
import fractions
import scipy
from scipy.sparse.linalg import spsolve
from . import matroid
from . import sparserationals
def computeModules(cmod, variable=None, tol=1e-5):
"""Computes flux modules for a given model and set of variable reactions.
This method only computes the minimal 0-modules with constant interface
flux.
This method is a convenience method for computeModulesMatroid().
This method requires that a flux variability analysis has been performed
beforehand. However, the set of variable reactions must not be passed
explicitly, but it is sufficient that the variability data is encoded in
the model.
Parameters:
- cmod a cbmpy metabolic network model
- variable list of reaction ids of reactions with variable flux rate. If
this parameter is not given, the flux variability data is directly
fetched from the model.
- tol gives tives the tolerance for checking variability. If span <= tol,
then the reaction is assumed to have fixed flux. This parameter
is only used if not a list of variable reactions is given.
Returns:
A list of lists, where each list contains the ids of
the reactions in the module
"""
# identify variable reactions if not explicitly given
if variable == None:
variable = []
for i in range(len(cmod.reactions)):
r = cmod.reactions[i]
assert r.getFVAdata() != None, 'You must run FVA first'
span = r.getFVAdata()[3]
if (span > tol) or isnan(span):
variable.append(r.getId())
# compute modules
modmatroids = computeModulesMatroid(cmod, variable)
# turn into output format
mods = []
for m in modmatroids:
mods.append(m.elems)
return mods
def computeModulesMatroid(cmod, variable):
"""Computes flux modules for a given model and set of variable reactions.
This method only computes the minimal 0-modules with constant interface
flux. Only the stoichiometry data of the model is used.
Parameters:
- cmod a cbmpy metabolic network model
- variable list of reaction ids of reactions with variable flux rate
Returns:
A list of matroids, where each matroid represents a module.
"""
# cmod = cmod.clone()
# for r in cmod.getReactionIds():
# if r not in variable:
# cmod.deleteReactionAndBounds(r)
#
# cmod.buildStoichMatrix(matrix_type='sympy')
# print("finished building matrix")
# matrix = cmod.N.array
matrix = sparserationals.Matrix()
matrix.addMetabolicNetwork(cmod)
var = []
varNames = []
for ri, r in enumerate(cmod.reactions):
if r.getId() in variable:
var.append(ri)
varNames.append(r.getId())
matrix = matrix[:, var]
m = matroid.fromMatrix(matrix, varNames)
mods = m.findModules()
for mod in mods:
print(mod.elems)
return mods
def getInterface(cmod, matroid, separation):
""" compute the interface of the separation in the given metabolic network
We assume that the matroid is describing the variable reactions of cmod.
TODO: Allow separation to also contain reactions with fixed flux rate
and do not ignore the affine part of the interface
"""
circuitInterface = matroid.getInterface(separation)
matrix = sparserationals.Matrix()
reactions, metabolites = matrix.addMetabolicNetwork(cmod)
interface = []
for c in circuitInterface:
interfaceVector = sparserationals.Matrix()
test = sparserationals.Matrix()
for r, v in c.viewitems():
rxnIdx = reactions.index(r)
if r in separation:
interfaceVector += matrix[:, rxnIdx] * v
test += matrix[:, rxnIdx] * v
# verify that it is a circuit
for i in range(len(metabolites)):
assert fractions.Fraction(test[i, 0]) == 0
# first try to simplify (compute smallest common multiple)
scm = 1
for i in range(len(metabolites)):
v = fractions.Fraction(interfaceVector[i, 0])
if v != 0:
scm *= v.denominator / fractions.gcd(scm, v.denominator)
interfaceVector *= scm
# now get the numerators small
gcd = 0
for i in range(len(metabolites)):
v = fractions.Fraction(interfaceVector[i, 0])
if v != 0:
gcd = fractions.gcd(gcd, v.numerator)
interfaceVector /= gcd
interfaceMap = {}
for i in range(len(metabolites)):
if interfaceVector[i, 0] != 0:
interfaceMap[metabolites[i]] = interfaceVector[i, 0]
interface.append(interfaceMap)
return interface
# The following is commented out, because
# we can do this more elegantly in exact arithmetic (see code above)!
# cmod.buildStoichMatrix('scipy_csr')
# for c in circuitInterface:
# # first we compute a flux vector for the circuit
# # select one reaction and fix its flux to 1
# # then compute flux values by solving S_c v_c = 0
# assert len(c) > 0
# firstRxn = c[0]
# b = -cmod.N.array[:,cmod.N.col.index(firstRxn)]
# rxnIdx = [cmod.N.col.index(r) for r in c[1:]]
# A = cmod.N.array[:, rxnIdx]
# v = spsolve(A, b)
# sepIdx = [i for i in range(len(c)-1) if c[i+1] in separation]
# interface = A[:,sepIdx] * v
# if c[0] in separation:
# interface -= b
|
SystemsBioinformatics/cbmpy
|
cbmpy/fluxmodules/fluxmodules.py
|
Python
|
gpl-3.0
| 6,629
|
[
"PySCeS"
] |
b864e7d393c482e1a2629f3825bb3e1bad1c14d776312e72058cb36fd0f5aa4a
|
#!/usr/bin/env python
"""
Stop a given production
Usage:
dirac-prod-stop prodID
Arguments:
prodID: Production ID (mandatory)
Example:
$ dirac-prod-stop 381
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine()
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp(exitCode=1)
# get arguments
prodID = args[0]
prodClient = ProductionClient()
res = prodClient.setProductionStatus(prodID, 'Stopped')
if res['OK']:
DIRAC.gLogger.notice('Production %s successully stopped' % prodID)
else:
DIRAC.gLogger.error(res['Message'])
DIRAC.exit(-1)
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/ProductionSystem/scripts/dirac_prod_stop.py
|
Python
|
gpl-3.0
| 970
|
[
"DIRAC"
] |
6620d41fd686f9d672cfd1fc8ed5927553fd9b899ff8ad50cfb3fc6fb0425404
|
#***********************************************************************
# This code is part of CmplServer
#
# Copyright (C) 2013, 2014
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# CmplServer is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# CmplServer is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CmplServer is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
import sys
import os
#************* pyCmpl and cmplserver includes ****************
isSolverStudio=False
try:
isSolverStudio = os.environ['SOLVERSTUDIO']=='1'
if isSolverStudio:
try:
cmplPath=os.environ['CMPLPATH']
except:
raise Exception("Internal error - cannot find CMPL path")
cmplbin=cmplPath+os.sep+"bin"+os.sep+"cmpl.exe"
os.environ.update({'CMPLBINARY':cmplbin })
os.chdir(cmplPath)
cmplServerPath=cmplPath+os.sep+"cmplServer"
os.environ.update({'CMPLSERVERPATH':cmplServerPath })
sys.path.append(cmplPath)
except:
pass
from cmplServer import *
from pyCmpl.CmplDefs import *
#************* end pyCmpl and cmplserver includes ****************
import xmlrpclib
import socket
#*********** startServer *************
def startCmplServer(port, mode):
try:
if port!=None:
mPort = int(port)
else:
mPort=port
m = CmplServer(mPort, mode)
m.startServer()
except socket.error, e:
print "CmplServer error: ",e
except CmplServerException, e:
print e.msg
except ValueError:
print "Wrong port: " + port
except KeyboardInterrupt:
print "CmplServer has been stopped"
#*********** end startServer *********
#*********** startScheduler *************
def startScheduler(port):
try:
mPort = int(port)
m = CmplGridScheduler(mPort)
m.startCmplGridScheduler()
except socket.error, e:
print "CmplGrid scheduler error: ",e
except CmplServerException, e:
print e.msg
except ValueError:
print "Wrong port: " + port
except KeyboardInterrupt:
print "CmplGrid scheduler has been stopped"
#*********** end startScheduler *********
#*********** isRunning ****************
def status():
url = "http://localhost"
if port!=None:
url += ":"+port
else:
url += ":8008"
try:
cmplServer = xmlrpclib.ServerProxy(url)
ret = cmplServer.status()
print "CmplServer is running: "+ str(ret)
except:
print "CmplServer is not running ..." + str(sys.exc_info()[1])
#*********** end isRunning ************
#*********** stopServer ***************
def stopServer():
url = "http://localhost"
if port!=None:
url += ":"+port
else:
url += ":8008"
try:
cmplServer = xmlrpclib.ServerProxy(url)
if cmplServer.stopServer():
print "CmplServer has been stopped"
except:
print "Error: "+ str(sys.exc_info()[1])
#*********** end stopServer ***********
if len(sys.argv)==3:
port = sys.argv[2]
else:
port = None
print port
if sys.argv[1]=="-start":
startCmplServer(port, STANDALONE_SERVER )
elif sys.argv[1]=="-startInGrid":
startCmplServer(port, CMPL_GRID)
elif sys.argv[1]=="-startScheduler":
startScheduler(port)
elif sys.argv[1]=="-stop":
stopServer()
elif sys.argv[1]=="-status":
status()
else:
quit()
|
Mangara/ArboralExplorer
|
lib/Cmpl/cmplServer/cmplServer/cmplServerHandler.py
|
Python
|
apache-2.0
| 4,038
|
[
"VisIt"
] |
77f89f64a1d484cec59958e59031d79ac52e16938537a7e5e6b50399e86b3656
|
'''
Created on Feb 1, 2017
@author: Alexandre Day
Purpose:
Perform density clustering on gaussian mixture
'''
from fdc import FDC
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import normalized_mutual_info_score as nmi
from fdc import plotting
import pickle
import numpy as np
n_true_center = 15
np.random.seed(0)
print("------> Example with %i true cluster centers <-------"%n_true_center)
X, y = make_blobs(500007, 2, n_true_center) # Generating random gaussian mixture
X = StandardScaler().fit_transform(X) # always normalize your data :)
# set eta=0.0 if you have excellent density profile fit (lots of data say)
model = FDC(eta = 0.01)#, atol=0.0001, rtol=0.0001)
model.fit(X) # performing the clustering
exit()
print("Normalized mutual information = %.4f"%nmi(y, model.cluster_label))
plotting.set_nice_font() # nicer plotting font !
plotting.summary_model(model, ytrue=y, show=True, savefile="result.png")
|
alexandreday/fast_density_clustering
|
example/example3.py
|
Python
|
bsd-3-clause
| 1,001
|
[
"Gaussian"
] |
b91e5024171fb7bd665b8d65f380d4c7ab6b76744e1bb96368fdcddc8d8bae98
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""K-point/spin combination-descriptors
This module contains classes for defining combinations of two indices:
* Index k for irreducible kpoints in the 1st Brillouin zone.
* Index s for spin up/down if spin-polarized (otherwise ignored).
"""
import numpy as np
from ase.units import Bohr
from ase.dft.kpoints import monkhorst_pack, get_monkhorst_pack_size_and_offset
from gpaw.symmetry import Symmetry
from gpaw.kpoint import KPoint
import gpaw.mpi as mpi
import _gpaw
def to1bz(bzk_kc, cell_cv):
"""Wrap k-points to 1. BZ.
Return k-points wrapped to the 1. BZ.
bzk_kc: (n,3) ndarray
Array of k-points in units of the reciprocal lattice vectors.
cell_cv: (3,3) ndarray
Unit cell.
"""
B_cv = 2.0 * np.pi * np.linalg.inv(cell_cv).T
K_kv = np.dot(bzk_kc, B_cv)
N_xc = np.indices((3, 3, 3)).reshape((3, 27)).T - 1
G_xv = np.dot(N_xc, B_cv)
bz1k_kc = bzk_kc.copy()
# Find the closest reciprocal lattice vector:
for k, K_v in enumerate(K_kv):
# If a k-point has the same distance to several reciprocal
# lattice vectors, we don't want to pick a random one on the
# basis of numerical noise, so we round off the differences
# between the shortest distances to 6 decimals and chose the
# one with the lowest index.
d = ((G_xv - K_v)**2).sum(1)
x = (d - d.min()).round(6).argmin()
bz1k_kc[k] -= N_xc[x]
return bz1k_kc
class KPointDescriptor:
"""Descriptor-class for k-points."""
def __init__(self, kpts, nspins=1, collinear=True, usefractrans=False):
"""Construct descriptor object for kpoint/spin combinations (ks-pair).
Parameters
----------
kpts: None, sequence of 3 ints, or (n,3)-shaped array
Specification of the k-point grid. None=Gamma, list of
ints=Monkhorst-Pack, ndarray=user specified.
nspins: int
Number of spins.
usefractrans: bool
Switch for the use of non-symmorphic symmetries aka: symmetries
with fractional translations. False by default (experimental!!!)
Attributes
=================== =================================================
``N_c`` Number of k-points in the different directions.
``nspins`` Number of spins in total.
``mynspins`` Number of spins on this CPU.
``nibzkpts`` Number of irreducible kpoints in 1st BZ.
``nks`` Number of k-point/spin combinations in total.
``mynks`` Number of k-point/spin combinations on this CPU.
``gamma`` Boolean indicator for gamma point calculation.
``comm`` MPI-communicator for kpoint distribution.
``weight_k`` Weights of each k-point
``ibzk_kc`` Unknown
``sym_k`` Unknown
``time_reversal_k`` Unknown
``bz2ibz_k`` Unknown
``ibz2bz_k`` Unknown
``bz2bz_ks`` Unknown
``symmetry`` Object representing symmetries
=================== =================================================
"""
if kpts is None:
self.bzk_kc = np.zeros((1, 3))
self.N_c = np.array((1, 1, 1), dtype=int)
self.offset_c = np.zeros(3)
elif isinstance(kpts[0], int):
self.bzk_kc = monkhorst_pack(kpts)
self.N_c = np.array(kpts, dtype=int)
self.offset_c = np.zeros(3)
else:
self.bzk_kc = np.array(kpts, float)
try:
self.N_c, self.offset_c = \
get_monkhorst_pack_size_and_offset(self.bzk_kc)
except ValueError:
self.N_c = None
self.offset_c = None
self.collinear = collinear
self.nspins = nspins
self.nbzkpts = len(self.bzk_kc)
# Gamma-point calculation?
self.usefractrans = usefractrans
self.gamma = (self.nbzkpts == 1 and np.allclose(self.bzk_kc[0], 0.0))
self.set_symmetry(None, None, usesymm=None)
self.set_communicator(mpi.serial_comm)
if self.gamma:
self.description = '1 k-point (Gamma)'
else:
self.description = '%d k-points' % self.nbzkpts
if self.N_c is not None:
self.description += (': %d x %d x %d Monkhorst-Pack grid' %
tuple(self.N_c))
if self.offset_c.any():
self.description += ' + ['
for x in self.offset_c:
if x != 0 and abs(round(1 / x) - 1 / x) < 1e-12:
self.description += '1/%d,' % round(1 / x)
else:
self.description += '%f,' % x
self.description = self.description[:-1] + ']'
def __len__(self):
"""Return number of k-point/spin combinations of local CPU."""
return self.mynks
def set_symmetry(self, atoms, setups, magmom_av=None,
usesymm=False, N_c=None, comm=None):
"""Create symmetry object and construct irreducible Brillouin zone.
atoms: Atoms object
Defines atom positions and types and also unit cell and
boundary conditions.
setups: instance of class Setups
PAW setups for the atoms.
magmom_av: ndarray
Initial magnetic moments.
usesymm: bool
Symmetry flag.
N_c: three int's or None
If not None: Check also symmetry of grid.
"""
if atoms is not None:
for c, periodic in enumerate(atoms.pbc):
if not periodic and not np.allclose(self.bzk_kc[:, c], 0.0):
raise ValueError('K-points can only be used with PBCs!')
self.cell_cv = atoms.cell / Bohr
if magmom_av is None:
magmom_av = np.zeros((len(atoms), 3))
magmom_av[:, 2] = atoms.get_initial_magnetic_moments()
magmom_av = magmom_av.round(decimals=3) # round off
id_a = zip(setups.id_a, *magmom_av.T)
# Construct a Symmetry instance containing the identity operation
# only
self.symmetry = Symmetry(id_a, atoms.cell / Bohr, atoms.pbc, fractrans=self.usefractrans)
self.usefractrans = self.symmetry.usefractrans
else:
self.symmetry = None
if self.gamma or usesymm is None:
# Point group and time-reversal symmetry neglected
self.weight_k = np.ones(self.nbzkpts) / self.nbzkpts
self.ibzk_kc = self.bzk_kc.copy()
self.sym_k = np.zeros(self.nbzkpts, int)
self.time_reversal_k = np.zeros(self.nbzkpts, bool)
self.bz2ibz_k = np.arange(self.nbzkpts)
self.ibz2bz_k = np.arange(self.nbzkpts)
self.bz2bz_ks = np.arange(self.nbzkpts)[:, np.newaxis]
else:
if usesymm:
# Find symmetry operations of atoms
self.symmetry.analyze(atoms.get_scaled_positions())
if N_c is not None:
if self.usefractrans:
## adjust N_c to symmetries
# the factor (denominator) the grid must follow
factor = np.ones(3, float)
indexes = np.where(np.abs(self.symmetry.ft_sc) > 1e-3)
for i in range(len(indexes[0])):
# find smallest common denominator
a = factor[indexes[1][i]]
b = np.rint(1. / self.symmetry.ft_sc[indexes[0][i]][indexes[1][i]])
factor[indexes[1][i]] = a * b
while b != 0:
rem = a % b
a = b
b = rem
factor[indexes[1][i]] /= a
Nnew_c = np.array(np.rint(N_c / factor) * factor, int)
# make sure new grid is not less dense
Nnew_c = np.array(np.where(Nnew_c >= N_c, Nnew_c, Nnew_c + factor), int)
N_c = Nnew_c
else:
## adjust symmetries to grid
self.symmetry.prune_symmetries_grid(N_c)
(self.ibzk_kc, self.weight_k,
self.sym_k,
self.time_reversal_k,
self.bz2ibz_k,
self.ibz2bz_k,
self.bz2bz_ks) = self.symmetry.reduce(self.bzk_kc, comm)
if setups is not None:
setups.set_symmetry(self.symmetry)
# Number of irreducible k-points and k-point/spin combinations.
self.nibzkpts = len(self.ibzk_kc)
if self.collinear:
self.nks = self.nibzkpts * self.nspins
else:
self.nks = self.nibzkpts
return N_c
def set_communicator(self, comm):
"""Set k-point communicator."""
# Ranks < self.rank0 have mynks0 k-point/spin combinations and
# ranks >= self.rank0 have mynks0+1 k-point/spin combinations.
mynks0, x = divmod(self.nks, comm.size)
self.rank0 = comm.size - x
self.comm = comm
# My number and offset of k-point/spin combinations
self.mynks = self.get_count()
self.ks0 = self.get_offset()
if self.nspins == 2 and comm.size == 1: # NCXXXXXXXX
# Avoid duplicating k-points in local list of k-points.
self.ibzk_qc = self.ibzk_kc.copy()
self.weight_q = self.weight_k
else:
self.ibzk_qc = np.vstack((self.ibzk_kc,
self.ibzk_kc))[self.get_slice()]
self.weight_q = np.hstack((self.weight_k,
self.weight_k))[self.get_slice()]
def copy(self, comm=mpi.serial_comm):
"""Create a copy with shared symmetry object."""
kd = KPointDescriptor(self.bzk_kc, self.nspins)
kd.weight_k = self.weight_k
kd.ibzk_kc = self.ibzk_kc
kd.sym_k = self.sym_k
kd.time_reversal_k = self.time_reversal_k
kd.bz2ibz_k = self.bz2ibz_k
kd.ibz2bz_k = self.ibz2bz_k
kd.bz2bz_ks = self.bz2bz_ks
kd.symmetry = self.symmetry
kd.nibzkpts = self.nibzkpts
kd.nks = self.nks
kd.set_communicator(comm)
return kd
def create_k_points(self, gd):
"""Return a list of KPoints."""
sdisp_cd = gd.sdisp_cd
kpt_u = []
for ks in range(self.ks0, self.ks0 + self.mynks):
s, k = divmod(ks, self.nibzkpts)
q = (ks - self.ks0) % self.nibzkpts
if self.collinear:
weight = self.weight_k[k] * 2 / self.nspins
else:
weight = self.weight_k[k]
if self.gamma:
phase_cd = np.ones((3, 2), complex)
else:
phase_cd = np.exp(2j * np.pi *
sdisp_cd * self.ibzk_kc[k, :, np.newaxis])
kpt_u.append(KPoint(weight, s, k, q, phase_cd))
return kpt_u
def collect(self, a_ux, broadcast=True):
"""Collect distributed data to all."""
if self.comm.rank == 0 or broadcast:
xshape = a_ux.shape[1:]
a_skx = np.empty((self.nspins, self.nibzkpts) + xshape, a_ux.dtype)
a_Ux = a_skx.reshape((-1,) + xshape)
else:
a_skx = None
if self.comm.rank > 0:
self.comm.send(a_ux, 0)
else:
u1 = self.get_count(0)
a_Ux[0:u1] = a_ux
requests = []
for rank in range(1, self.comm.size):
u2 = u1 + self.get_count(rank)
requests.append(self.comm.receive(a_Ux[u1:u2], rank,
block=False))
u1 = u2
assert u1 == len(a_Ux)
self.comm.waitall(requests)
if broadcast:
self.comm.broadcast(a_Ux, 0)
return a_skx
def transform_wave_function(self, psit_G, k, index_G=None, phase_G=None):
"""Transform wave function from IBZ to BZ.
k is the index of the desired k-point in the full BZ.
"""
s = self.sym_k[k]
time_reversal = self.time_reversal_k[k]
op_cc = np.linalg.inv(self.symmetry.op_scc[s]).round().astype(int)
# Identity
if (np.abs(op_cc - np.eye(3, dtype=int)) < 1e-10).all():
if time_reversal:
return psit_G.conj()
else:
return psit_G
# General point group symmetry
else:
ik = self.bz2ibz_k[k]
kibz_c = self.ibzk_kc[ik]
b_g = np.zeros_like(psit_G)
kbz_c = np.dot(self.symmetry.op_scc[s], kibz_c)
if index_G is not None:
assert index_G.shape == psit_G.shape == phase_G.shape,\
'Shape mismatch %s vs %s vs %s' % (index_G.shape,
psit_G.shape,
phase_G.shape)
_gpaw.symmetrize_with_index(psit_G, b_g, index_G, phase_G)
else:
_gpaw.symmetrize_wavefunction(psit_G, b_g, op_cc.copy(),
np.ascontiguousarray(kibz_c),
kbz_c)
if time_reversal:
return b_g.conj()
else:
return b_g
def get_transform_wavefunction_index(self, nG, k):
"""Get the "wavefunction transform index".
This is a permutation of the numbers 1, 2, .. N which
associates k + q to some k, and where N is the total
number of grid points as specified by nG which is a
3D tuple.
Returns index_G and phase_G which are one-dimensional
arrays on the grid."""
s = self.sym_k[k]
op_cc = np.linalg.inv(self.symmetry.op_scc[s]).round().astype(int)
# General point group symmetry
if (np.abs(op_cc - np.eye(3, dtype=int)) < 1e-10).all():
nG0 = np.prod(nG)
index_G = np.arange(nG0).reshape(nG)
phase_G = np.ones(nG)
else:
ik = self.bz2ibz_k[k]
kibz_c = self.ibzk_kc[ik]
index_G = np.zeros(nG, dtype=int)
phase_G = np.zeros(nG, dtype=complex)
kbz_c = np.dot(self.symmetry.op_scc[s], kibz_c)
_gpaw.symmetrize_return_index(index_G, phase_G, op_cc.copy(),
np.ascontiguousarray(kibz_c),
kbz_c)
return index_G, phase_G
#def find_k_plus_q(self, q_c, k_x=None):
def find_k_plus_q(self, q_c, kpts_k=None):
"""Find the indices of k+q for all kpoints in the Brillouin zone.
In case that k+q is outside the BZ, the k-point inside the BZ
corresponding to k+q is given.
Parameters
----------
q_c: ndarray
Coordinates for the q-vector in units of the reciprocal
lattice vectors.
kpts_k: list of ints
Restrict search to specified k-points.
"""
k_x = kpts_k
if k_x is None:
return self.find_k_plus_q(q_c, range(self.nbzkpts))
i_x = []
for k in k_x:
kpt_c = self.bzk_kc[k] + q_c
d_kc = kpt_c - self.bzk_kc
d_k = abs(d_kc - d_kc.round()).sum(1)
i = d_k.argmin()
if d_k[i] > 1e-8:
raise RuntimeError('Could not find k+q!')
i_x.append(i)
return i_x
def get_bz_q_points(self, first=False):
"""Return the q=k1-k2. q-mesh is always Gamma-centered."""
shift_c = 0.5 * ((self.N_c + 1) % 2) / self.N_c
bzq_qc = monkhorst_pack(self.N_c) + shift_c
if first:
return to1bz(bzq_qc, self.cell_cv)
else:
return bzq_qc
def get_ibz_q_points(self, bzq_qc, op_scc):
"""Return ibz q points and the corresponding symmetry operations that
work for k-mesh as well."""
ibzq_qc_tmp = []
ibzq_qc_tmp.append(bzq_qc[-1])
weight_tmp = [0]
for i, op_cc in enumerate(op_scc):
if np.abs(op_cc - np.eye(3)).sum() < 1e-8:
identity_iop = i
break
ibzq_q_tmp = {}
iop_q = {}
timerev_q = {}
diff_qc = {}
for i in range(len(bzq_qc) - 1, -1, -1): # loop opposite to kpoint
try:
ibzk, iop, timerev, diff_c = self.find_ibzkpt(
op_scc, ibzq_qc_tmp, bzq_qc[i])
find = False
for ii, iop1 in enumerate(self.sym_k):
if iop1 == iop and self.time_reversal_k[ii] == timerev:
find = True
break
if find is False:
raise ValueError('cant find k!')
ibzq_q_tmp[i] = ibzk
weight_tmp[ibzk] += 1.
iop_q[i] = iop
timerev_q[i] = timerev
diff_qc[i] = diff_c
except ValueError:
ibzq_qc_tmp.append(bzq_qc[i])
weight_tmp.append(1.)
ibzq_q_tmp[i] = len(ibzq_qc_tmp) - 1
iop_q[i] = identity_iop
timerev_q[i] = False
diff_qc[i] = np.zeros(3)
# reverse the order.
nq = len(ibzq_qc_tmp)
ibzq_qc = np.zeros((nq, 3))
ibzq_q = np.zeros(len(bzq_qc), dtype=int)
for i in range(nq):
ibzq_qc[i] = ibzq_qc_tmp[nq - i - 1]
for i in range(len(bzq_qc)):
ibzq_q[i] = nq - ibzq_q_tmp[i] - 1
self.q_weights = np.array(weight_tmp[::-1]) / len(bzq_qc)
return ibzq_qc, ibzq_q, iop_q, timerev_q, diff_qc
def find_ibzkpt(self, symrel, ibzk_kc, bzk_c):
"""Find index in IBZ and related symmetry operations."""
find = False
ibzkpt = 0
iop = 0
timerev = False
for sign in (1, -1):
for ioptmp, op in enumerate(symrel):
for i, ibzk in enumerate(ibzk_kc):
diff_c = bzk_c - sign * np.dot(op, ibzk)
if (np.abs(diff_c - diff_c.round()) < 1e-8).all():
ibzkpt = i
iop = ioptmp
find = True
if sign == -1:
timerev = True
break
if find == True:
break
if find == True:
break
if find == False:
raise ValueError('Cant find corresponding IBZ kpoint!')
return ibzkpt, iop, timerev, diff_c.round()
def where_is_q(self, q_c, bzq_qc):
"""Find the index of q points in BZ."""
d_qc = q_c - bzq_qc
d_q = abs(d_qc - d_qc.round()).sum(1)
q = d_q.argmin()
if d_q[q] > 1e-8:
raise RuntimeError('Could not find q!')
return q
def get_count(self, rank=None):
"""Return the number of ks-pairs which belong to a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
mynks0 = self.nks // self.comm.size
mynks = mynks0
if rank >= self.rank0:
mynks += 1
return mynks
def get_offset(self, rank=None):
"""Return the offset of the first ks-pair on a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
mynks0 = self.nks // self.comm.size
ks0 = rank * mynks0
if rank >= self.rank0:
ks0 += rank - self.rank0
return ks0
def get_rank_and_index(self, s, k):
"""Find rank and local index of k-point/spin combination."""
u = self.where_is(s, k)
rank, myu = self.who_has(u)
return rank, myu
def get_slice(self, rank=None):
"""Return the slice of global ks-pairs which belong to a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
mynks, ks0 = self.get_count(rank), self.get_offset(rank)
uslice = slice(ks0, ks0 + mynks)
return uslice
def get_indices(self, rank=None):
"""Return the global ks-pair indices which belong to a given rank."""
uslice = self.get_slice(rank)
return np.arange(*uslice.indices(self.nks))
def get_ranks(self):
"""Return array of ranks as a function of global ks-pair indices."""
ranks = np.empty(self.nks, dtype=int)
for rank in range(self.comm.size):
uslice = self.get_slice(rank)
ranks[uslice] = rank
assert (ranks >= 0).all() and (ranks < self.comm.size).all()
return ranks
def who_has(self, u):
"""Convert global index to rank information and local index."""
mynks0 = self.nks // self.comm.size
if u < mynks0 * self.rank0:
rank, myu = divmod(u, mynks0)
else:
rank, myu = divmod(u - mynks0 * self.rank0, mynks0 + 1)
rank += self.rank0
return rank, myu
def global_index(self, myu, rank=None):
"""Convert rank information and local index to global index."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
ks0 = self.get_offset(rank)
u = ks0 + myu
return u
def what_is(self, u):
"""Convert global index to corresponding kpoint/spin combination."""
s, k = divmod(u, self.nibzkpts)
return s, k
def where_is(self, s, k):
"""Convert kpoint/spin combination to the global index thereof."""
u = k + self.nibzkpts * s
return u
#def get_size_of_global_array(self):
# return (self.nspins*self.nibzkpts,)
#
#def ...
class KPointDescriptorOld:
"""Descriptor-class for ordered lists of kpoint/spin combinations
TODO
"""
def __init__(self, nspins, nibzkpts, comm=None, gamma=True, dtype=float):
"""Construct descriptor object for kpoint/spin combinations (ks-pair).
Parameters:
nspins: int
Number of spins.
nibzkpts: int
Number of irreducible kpoints in 1st Brillouin zone.
comm: MPI-communicator
Communicator for kpoint-groups.
gamma: bool
More to follow.
dtype: NumPy dtype
More to follow.
Note that if comm.size is greater than the number of spins, then
the kpoints cannot all be located at the gamma point and therefor
the gamma boolean loses its significance.
Attributes:
============ ======================================================
``nspins`` Number of spins.
``nibzkpts`` Number of irreducible kpoints in 1st Brillouin zone.
``nks`` Number of k-point/spin combinations in total.
``mynks`` Number of k-point/spin combinations on this CPU.
``gamma`` Boolean indicator for gamma point calculation.
``dtype`` Data type appropriate for wave functions.
``beg`` Beginning of ks-pair indices in group (inclusive).
``end`` End of ks-pair indices in group (exclusive).
``step`` Stride for ks-pair indices between ``beg`` and ``end``.
``comm`` MPI-communicator for kpoint distribution.
============ ======================================================
"""
if comm is None:
comm = mpi.serial_comm
self.comm = comm
self.rank = self.comm.rank
self.nspins = nspins
self.nibzkpts = nibzkpts
self.nks = self.nibzkpts * self.nspins
# XXX Check from distribute_cpus in mpi/__init__.py line 239 rev. 4187
if self.nks % self.comm.size != 0:
raise RuntimeError('Cannot distribute %d k-point/spin ' \
'combinations to %d processors' % \
(self.nks, self.comm.size))
self.mynks = self.nks // self.comm.size
# TODO Move code from PAW.initialize in paw.py lines 319-328 rev. 4187
self.gamma = gamma
self.dtype = dtype
uslice = self.get_slice()
self.beg, self.end, self.step = uslice.indices(self.nks)
#XXX u is global kpoint index
def __len__(self):
return self.mynks
def get_rank_and_index(self, s, k):
"""Find rank and local index of k-point/spin combination."""
u = self.where_is(s, k)
rank, myu = self.who_has(u)
return rank, myu
def get_slice(self, rank=None):
"""Return the slice of global ks-pairs which belong to a given rank."""
if rank is None:
rank = self.comm.rank
assert rank in xrange(self.comm.size)
ks0 = rank * self.mynks
uslice = slice(ks0, ks0 + self.mynks)
return uslice
def get_indices(self, rank=None):
"""Return the global ks-pair indices which belong to a given rank."""
uslice = self.get_slice(rank)
return np.arange(*uslice.indices(self.nks))
def get_ranks(self):
"""Return array of ranks as a function of global ks-pair indices."""
ranks = np.empty(self.nks, dtype=int)
for rank in range(self.comm.size):
uslice = self.get_slice(rank)
ranks[uslice] = rank
assert (ranks >= 0).all() and (ranks < self.comm.size).all()
return ranks
def who_has(self, u):
"""Convert global index to rank information and local index."""
rank, myu = divmod(u, self.mynks)
return rank, myu
def global_index(self, myu, rank=None):
"""Convert rank information and local index to global index."""
if rank is None:
rank = self.comm.rank
u = rank * self.mynks + myu
return u
def what_is(self, u):
"""Convert global index to corresponding kpoint/spin combination."""
s, k = divmod(u, self.nibzkpts)
return s, k
def where_is(self, s, k):
"""Convert kpoint/spin combination to the global index thereof."""
u = k + self.nibzkpts * s
return u
#def get_size_of_global_array(self):
# return (self.nspins*self.nibzkpts,)
#
#def ...
|
robwarm/gpaw-symm
|
gpaw/kpt_descriptor.py
|
Python
|
gpl-3.0
| 27,162
|
[
"ASE",
"GPAW"
] |
5e961c4396f4f4370b6b1d3f0bbe2749d4149c7904d8b6af8f86c01f7634d254
|
"""
The dao for pymatgen.
"""
print(
"""
The 道 of Pymatgen
1. Great code enables great materials science.
2. Comprehensive tests ensure robustness.
3. Clear documentation leads to more usage.
4. More usage improves code quality (and increases citations).
5. Even complex scientific ideas can be broken down into simple interfaces.
6. Though deep (Hulk-level) understanding is often necessary to develop the right interface design.
7. Slow and accurate is better than fast and wrong.
8. But efficiency matters for core classes.
9. The law of thermodynamics apply: code entropy always increases in a closed system.
10. Constant refactoring is the hallmark of an open platform.
---
First Coder
"""
)
|
vorwerkc/pymatgen
|
pymatgen/dao.py
|
Python
|
mit
| 715
|
[
"pymatgen"
] |
0d9829ef98900aa1a0a402ff50135c578fed68f7902a9d33544a1aed2bd0412c
|
# c: 14.12.2007, r: 03.11.2008
import os
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output, pause, debug, Struct
from sfepy.fem import MeshIO
from sfepy.homogenization.utils import get_volume
from gen_mesh import gen_concentric
is_3D = False
generate_2D = False
fig_suffix = '.pdf'
if is_3D:
filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
out_groups = [1]
in_groups = [2]
diameters_g = None
tepss_g = nm.logspace( -3, -0.5, 11 )
default_y3_diameter = 0.1
diameters_g = nm.linspace( 0.075, 0.26, 11 )
else:
# This mesh is generated, see below.
filename_mesh = data_dir + '/meshes/2d/special//circle_in_square.vtk'
out_groups = [1]
if generate_2D:
in_groups, diameters_g = gen_concentric( 'tmp/mesh.geo',
1., 0.2, 0.08, 0.1, 0.6, 7 )
diameters_g = nm.array( diameters_g[:-2] ) + 0.001
else:
os.system("cp %s tmp/mesh.geo" % filename_mesh.replace('.vtk', '.geo'))
in_groups = [2]
diameters_g = None
tepss_g = nm.logspace( -3, -1, 11 )
default_y3_diameter = 0.25
os.system("gmsh -2 tmp/mesh.geo -format mesh")
os.system("script/mesh_to_vtk.py tmp/mesh.mesh %s" % filename_mesh)
#pause()
cwd = os.path.split( os.path.join( os.getcwd(), __file__ ) )[0]
options = {
'save_eig_vectors' : (10, 0),
# Either:
'eig_range' : (0, 10), # -> freq_range = eigs[slice(*eig_range)][[0, -1]]
# Or (this has precedence if not None):
'fixed_eig_range' : (0., 50.),
'freq_margins' : (10, 10), # % of freq_range
'feps' : 1e-4, # frequency
'zeps' : 1e-10, # zero finding
'teps' : 1e-1, # eigenmomentum threshold
'teps_rel' : True, # eigenmomentum threshold is relative w.r.t. largest one
'freq_step' : 0.02, # in percent of freq_range
# 'eig_vector_transform' : ('select_in_plane', 'z', 1e-1),
# 'plot_transform' : ('clip', (-20, 20)),
'plot_transform' : ('normalize', (-2, 2)),
# 'plot_transform' : None,
#############################################
# 'parametric_hook' : 'vary_y3_size',
# 'parametric_hook' : 'vary_teps',
'post_process_hook' : 'post_process',
'output_dir' : os.path.join( cwd, 'output/' ),
#############################################
'eigenmomentum' : {'var' : 'u',
'regions' : ['Y2', 'Y3'],
'term' : '%.12e * di_volume_integrate.2.%s( u )'},
# Used to compute average density.
'region_to_material' : {'Y1' : 'matrix',
'Y2' : 'inclusion',
'Y3' : 'rigid',},
'tensor_names' : {'elastic' : 'lame',},
'volume' : lambda problem, region_name: get_volume(problem,
'displacement_Y',
region_name,
quad_order=1),
'eig_problem' : 'simple',
'fig_name' : os.path.join(cwd, 'output', 'band_gaps_sym_025' + fig_suffix),
'plot_options' : {
'show' : True, # Show figure.
'legend' : False, # Show legend.
},
'plot_rsc' : { # Resources for all plots.
'resonance' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '-' },
'masked' : {'linewidth' : 0.2, 'color' : 'k', 'linestyle' : ':' },
'x_axis' : {'linewidth' : 1, 'color' : 'k', 'linestyle' : '-' },
'eig_min' : {'linewidth' : 1, 'color' : 'k', 'linestyle' : '--' },
'eig_max' : {'linewidth' : 1, 'color' : 'k', 'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (1, 1, 0.5) },
'weak_gap' : {'linewidth' : 0, 'facecolor' : (1, 1, 1) },
'propagation' : {'linewidth' : 0, 'facecolor' : (0.5, 1, 0.5) },
## 'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.6, 0.6) },
## 'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.8, 0.8, 0.8) },
## 'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1) },
'params' : {'axes.labelsize': 'x-large',
'text.fontsize': 'large',
'legend.fontsize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': False},
},
}
functions = {
'select_y3_circ' : (lambda coors, domain=None, diameter=None:
select_y3_circ(coors, diameter=default_y3_diameter),),
}
regions = {
'Y' : ('all', {}),
'Y1' : (' +e '.join( ('elements of group %d' % ig)
for ig in out_groups ), {}),
'Y23' : (' +e '.join( ('elements of group %d' % ig)
for ig in in_groups ), {}),
'Y3' : ('nodes by select_y3_circ', {}),
'Y2' : ('r.Y23 -e r.Y3', {}),
'Y23_Surface': ('r.Y1 *n r.Y23', {'can_cells' : False}),
}
material_1 = {
'name' : 'matrix',
# aluminium
'values' : {
'lam' : 5.898,
'mu' : 2.681,
'density' : 0.2799, # in 1e4 kg/m3
},
'flags' : {'special_constant' : True},
}
material_2 = {
'name' : 'inclusion',
# epoxy, in 1e+10 Pa
'values' : {
'lam' : 0.1798,
'mu' : 0.148,
'density' : 0.1142, # in 1e4 kg/m3
},
'flags' : {'special_constant' : True},
}
material_3 = {
'name' : 'rigid',
# lead, in 1e+10 Pa, does not matter
'values' : {
'lam' : 4.074 ,
'mu' : 0.5556,
'density' : 1.1340, # in 1e4 kg/m3
},
'flags' : {'special_constant' : True},
}
conf_dir = os.path.dirname(__file__)
dim = MeshIO.any_from_filename(filename_mesh,
prefix_dir=conf_dir).read_dimension()
field_0 = {
'name' : 'displacement_Y',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Y',
'approx_order' : 1,
}
field_1 = {
'name' : 'displacement_Y23',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Y23',
'approx_order' : 1,
}
variables = {
'u' : ('unknown field', 'displacement_Y23', 0),
'v' : ('test field', 'displacement_Y23', 'u'),
}
ebc_1 = {
'name' : 'ZeroSurface',
'region' : 'Y23_Surface',
'dofs' : {'u.all' : 0.0},
}
lcbc_1 = {
'name' : 'RigidBody',
'region' : 'Y3',
'dofs' : {'u.all' : 'rigid'},
}
##
# Eigenproblem equations.
# dw_lin_elastic_iso.i1.Y3( rigid.lame, v, u ) should have no effect!
equations = {
'lhs' : """dw_lin_elastic_iso.2.Y2( inclusion.lam, inclusion.mu, v, u )
+ dw_lin_elastic_iso.2.Y3( rigid.lam, rigid.mu, v, u )""",
'rhs' : """dw_mass_vector.2.Y2( inclusion.density, v, u )
+ dw_mass_vector.2.Y3( rigid.density, v, u )""",
}
def clip( data, plot_range ):
return nm.clip( data, *plot_range )
def normalize( data, plot_range ):
aux = nm.arctan( data )
return clip( aux, plot_range )
##
# 02.10.2007, c
def select_in_plane( vec, shape, normal_direction, eps ):
n_nod, dim = shape
dir_vecs = {2 : {'x': 0, 'y' : 1, 'z' : 1},
3 : {'x': 0, 'y' : 1, 'z' : 2}}
ident = nm.eye( dim, dtype = nm.float64 )
dir_vec = ident[:,dir_vecs[dim][normal_direction]]
proj = nm.dot( nm.reshape( vec, (n_nod, dim) ), dir_vec )
if nm.any( nm.abs( proj ) > eps ):
return nm.zeros_like( vec ), True
else:
return vec, False
def select_y3_circ(coors, diameter=None):
r = coors[:,0]**2 + coors[:,1]**2
if dim == 3:
r += coors[:,2]**2
r = nm.sqrt( r )
out = nm.where(r < diameter)[0]
if out.shape[0] <= 3:
raise ValueError( 'too few nodes selected! (%d)' % n )
return out
def extend_cell_data( data, pb, rname, val = None ):
n_el = pb.domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin( nm.abs( data ) )
else: # Scalar.
val = nm.amin( data )
edata = nm.empty( (n_el,) + data.shape[1:], dtype = nm.float64 )
edata.fill( val )
region = pb.domain.regions[rname]
offs = region.get_cell_offsets()
eoffs = pb.domain.get_cell_offsets()
## print offs
## print eoffs
## print pb.domain.mat_ids_to_i_gs
## pause()
for group in pb.domain.iter_groups():
ig = group.ig
ii = eoffs[ig]
if ig in region.igs:
n_cell = region.shape[ig].n_cell
ir = offs[ig]
edata[ii+region.cells[ig]] = data[ir:ir+n_cell]
return edata
def post_process( out, problem, mtx_phi ):
var = problem.get_variables()['u']
for key in out.keys():
ii = int( key[1:] )
vec = mtx_phi[:,ii].copy()
var.data_from_any(vec)
strain = problem.evaluate('de_cauchy_strain.2.Y23( u )', u=var,
verbose=False)
strain = extend_cell_data( strain, problem, 'Y23' )
out['strain%03d' % ii] = Struct(name='output_data',
mode='cell', data=strain,
dofs=None)
return out
def save_log( filename, bg, log_item ):
"""Saves band gaps, valid flags, eigenfrequencies."""
fd = open( filename, 'w' )
freq_range = bg.freq_range_margins
fd.write( log_item )
fd.write( 'squared: %s\n' % False )
fd.write( 'n_zeroed: %d\n' % bg.n_zeroed )
fd.write( 'n_eigs: %d\n' % bg.n_eigs )
fd.write( 'f0 f1 flag_min f_min v_min flag_max f_max v_max'
' kind\ndesc\n' )
format = "%f %f %d %f %f %d %f %f %s\n%s\n"
n_row = len( freq_range ) - 1
fd.write( '%d\n' % n_row )
for ir in xrange( n_row ):
f0, f1 = freq_range[[ir, ir+1]]
gmin, gmax = bg.gaps[ir]
fd.write( format % ((f0, f1) + tuple( gmin ) + tuple( gmax )
+ bg.kinds[ir]) )
fd.write( 'valid resonance\n' )
freq_range = bg.freq_range_initial
n_row = len( freq_range )
fd.write( '%d\n' % n_row )
valid_in_range = bg.valid[bg.eig_range]
for ir in xrange( n_row ):
fd.write( '%d %f\n' % (valid_in_range[ir], freq_range[ir] ) )
fd.close()
def vary_teps( problem ):
"""Vary eigenmomentum threshold."""
from sfepy.solvers.ts import get_print_info
output.prefix = 'vary_teps:'
if tepss_g is None:
tepss = nm.logspace( -3, -1, 11 )
else:
tepss = tepss_g
ofn_trunk, output_dir = problem.ofn_trunk, problem.output_dir
join = os.path.join
n_digit, aux, d_format = get_print_info( len( tepss ) + 1 )
for ii, teps in enumerate( tepss ):
output( 'iteration %d: teps %.2e' % (ii, teps) )
opts = problem.conf.options
opts.teps = teps
opts.plot_options['show'] = False
opts.fig_name = join( output_dir,
(('band_gaps_%s' % d_format)
+ '_teps_%3.2e' + fig_suffix) % (ii, teps) )
problem.ofn_trunk = ofn_trunk + '_' + (d_format % ii)
out = []
yield problem, out
evp, bg = out[-1]
filename = join( output_dir,
('band_gaps_%s.txt' % d_format) % ii )
log_item = '$10^q$: %f\n' % teps
save_log( filename, bg, log_item )
yield None
def vary_y3_size( problem ):
"""Vary size of Y3 inclusion."""
from sfepy.fem import ProblemDefinition
from sfepy.solvers.ts import get_print_info
output.prefix = 'vary_y3_size:'
y3_diameters = [0.2, 0.25, 0.3, 0.35, 0.4]
if diameters_g is None:
y3_diameters = nm.linspace( 0.15, 0.45, 16 )
else:
y3_diameters = diameters_g
# y3_diameters = [0.45]
ofn_trunk, output_dir = problem.ofn_trunk, problem.output_dir
join = os.path.join
conf = problem.conf
cr = conf.get_raw( 'regions' )
n_digit, aux, d_format = get_print_info( len( y3_diameters ) + 1 )
for ii, diameter in enumerate( y3_diameters ):
output( 'iteration %d: diameter %3.2f' % (ii, diameter) )
opts = problem.conf.options
cr['Y3'] = ('nodes by select_y3_circ( x, y, z, %.5f )' % diameter, {})
conf.edit( 'regions', cr )
problem = ProblemDefinition.from_conf( conf )
problem.save_regions( join( output_dir, ('regions_' + d_format) % ii ),
['Y2', 'Y3'] )
for region in problem.domain.regions:
if not region.has_cells_if_can():
raise ValueError( 'region %s has no cells!' % region.name )
opts.plot_options['show'] = False
opts.fig_name = join( output_dir,
(('band_gaps_%s' % d_format)
+ '_y3_%03.2f' + fig_suffix) % (ii, diameter) )
problem.ofn_trunk = ofn_trunk + '_' + (d_format % ii)
out = []
yield problem, out
evp, bg = out[-1]
filename = join( output_dir,
('band_gaps_%s.txt' % d_format) % ii )
log_item = '$r(Y_3)$: %f\n' % diameter
save_log( filename, bg, log_item )
yield None
|
olivierverdier/sfepy
|
examples/phononic/band_gaps_rigid.py
|
Python
|
bsd-3-clause
| 13,078
|
[
"VTK"
] |
7dc7a03dfd066cff4029eefc317989a7cef0ac1058e780cbc65fa80bef5bd392
|
# CamJam EduKit 1 - Basics
# Worksheet 7 - Traffic Lights Template
# Import Libraries
import os
import time
from gpiozero import LED, Button, Buzzer
# Set up variables for the LED, Buzzer and switch pins
# Define a function for the initial state (Green LED on, rest off)
# (If you have the second 'pedestrian LEDs, turn the red on & green
# off)
def startgreen():
# Remember all code in the function is indented
# Turn the green off and the amber on for 3 seconds
# ('Pedestrian' red LED stays lit)
def steadyamber():
# Remember all code in the function is indented
# Turn the amber off, and then the red on for 1 second
def steadyred():
# Remember all code in the function is indented
# Sound the buzzer for 4 seconds
# (If you have the 'pedestrian' LEDs, turn the red off and green on)
def startwalking():
# Make the buzzer buzz on and off, half a second of
# sound followed by half a second of silence
# Turn the buzzer off and wait for 2 seconds
# (If you have a second green 'pedestrian' LED, make it flash on and
# off for the two seconds)
def dontwalk():
# Remember all code in the function is indented
# Flash the amber on and off for 6 seconds
# (And the green 'pedestrian' LED too)
def flashingambergreen():
# Remember all code in the function is indented
# Flash the amber for one more second
# (Turn the green 'pedestrian' LED off and the red on)
def flashingamber():
# Remember all code in the function is indented
# Go through the traffic light sequence by calling each function
# one after the other.
def trafficlightqequence():
# Remember all code in the function is indented
os.system('clear') # Clears the terminal
print("Traffic Lights")
# Initialise the traffic lights
startgreen()
# Here is the loop that waits at lease 20 seconds before
# stopping the cars if the button has been pressed
while True: # Loop around forever
buttonnotpressed = True # Button has not been pressed
start = time.time() # Records the current time
while buttonnotpressed: # While the button has not been pressed
time.sleep(0.1) # Wait for 0.1s
if button.ispressed: # If the button is pressed
now = time.time()
buttonnotpressed = False # Button has been pressed
if (now - start) <= 20: # If under 20 seconds
time.sleep(20 - (now - start)) # Wait until 20s is up
trafficlightqequence() # Run the traffic light sequence
|
CamJam-EduKit/EduKit1
|
CamJam EduKit 1 - GPIO Zero/Code (GPIO Zero)/7-TrafficLights.py
|
Python
|
mit
| 2,470
|
[
"Amber"
] |
f21d90e4b11c3848e1635642e36a1f2fd6212f8f7da65083fb3c797c955dfdb4
|
#!/usr/bin/env python3
"""
Expansion module integrating with VMware NSX Defender.
"""
import argparse
import base64
import configparser
import datetime
import hashlib
import io
import ipaddress
import json
import logging
import pymisp
import sys
import vt
import zipfile
from urllib import parse
from typing import Any, Dict, List, Optional, Tuple, Union
import tau_clients
from tau_clients import exceptions
from tau_clients import nsx_defender
logger = logging.getLogger("vmware_nsx")
logger.setLevel(logging.DEBUG)
misperrors = {
"error": "Error",
}
mispattributes = {
"input": [
"attachment",
"malware-sample",
"url",
"md5",
"sha1",
"sha256",
],
"format": "misp_standard",
}
moduleinfo = {
"version": "0.2",
"author": "Jason Zhang, Stefano Ortolani",
"description": "Enrich a file or URL with VMware NSX Defender",
"module-type": ["expansion", "hover"],
}
moduleconfig = [
"analysis_url", # optional, defaults to hard-coded values
"analysis_verify_ssl", # optional, defaults to True
"analysis_key", # required
"analysis_api_token", # required
"vt_key", # optional
"misp_url", # optional
"misp_verify_ssl", # optional, defaults to True
"misp_key", # optional
]
DEFAULT_ZIP_PASSWORD = b"infected"
DEFAULT_ENDPOINT = tau_clients.NSX_DEFENDER_DC_WESTUS
WORKFLOW_COMPLETE_TAG = "workflow:state='complete'"
WORKFLOW_INCOMPLETE_TAG = "workflow:state='incomplete'"
VT_DOWNLOAD_TAG = "vt:download"
GALAXY_ATTACK_PATTERNS_UUID = "c4e851fa-775f-11e7-8163-b774922098cd"
class ResultParser:
"""This is a parser to extract *basic* information from a result dictionary."""
def __init__(self, techniques_galaxy: Optional[Dict[str, str]] = None):
"""Constructor."""
self.techniques_galaxy = techniques_galaxy or {}
def parse(self, analysis_link: str, result: Dict[str, Any]) -> pymisp.MISPEvent:
"""
Parse the analysis result into a MISP event.
:param str analysis_link: the analysis link
:param dict[str, any] result: the JSON returned by the analysis client.
:rtype: pymisp.MISPEvent
:return: a MISP event
"""
misp_event = pymisp.MISPEvent()
# Add analysis subject info
if "url" in result["analysis_subject"]:
o = pymisp.MISPObject("url")
o.add_attribute("url", result["analysis_subject"]["url"])
else:
o = pymisp.MISPObject("file")
o.add_attribute("md5", type="md5", value=result["analysis_subject"]["md5"])
o.add_attribute("sha1", type="sha1", value=result["analysis_subject"]["sha1"])
o.add_attribute("sha256", type="sha256", value=result["analysis_subject"]["sha256"])
o.add_attribute(
"mimetype",
category="Payload delivery",
type="mime-type",
value=result["analysis_subject"]["mime_type"]
)
misp_event.add_object(o)
# Add HTTP requests from url analyses
network_dict = result.get("report", {}).get("analysis", {}).get("network", {})
for request in network_dict.get("requests", []):
if not request["url"] and not request["ip"]:
continue
o = pymisp.MISPObject(name="http-request")
o.add_attribute("method", "GET")
if request["url"]:
parsed_uri = parse.urlparse(request["url"])
o.add_attribute("host", parsed_uri.netloc)
o.add_attribute("uri", request["url"])
if request["ip"]:
o.add_attribute("ip-dst", request["ip"])
misp_event.add_object(o)
# Add network behaviors from files
for subject in result.get("report", {}).get("analysis_subjects", []):
# Add DNS requests
for dns_query in subject.get("dns_queries", []):
hostname = dns_query.get("hostname")
# Skip if it is an IP address
try:
if hostname == "wpad" or hostname == "localhost":
continue
# Invalid hostname, e.g., hostname: ZLKKJRPY or 2.2.0.10.in-addr.arpa.
if "." not in hostname or hostname[-1] == ".":
continue
_ = ipaddress.ip_address(hostname)
continue
except ValueError:
pass
o = pymisp.MISPObject(name="domain-ip")
o.add_attribute("hostname", type="hostname", value=hostname)
for ip in dns_query.get("results", []):
o.add_attribute("ip", type="ip-dst", value=ip)
misp_event.add_object(o)
# Add HTTP conversations (as network connection and as http request)
for http_conversation in subject.get("http_conversations", []):
o = pymisp.MISPObject(name="network-connection")
o.add_attribute("ip-src", http_conversation["src_ip"])
o.add_attribute("ip-dst", http_conversation["dst_ip"])
o.add_attribute("src-port", http_conversation["src_port"])
o.add_attribute("dst-port", http_conversation["dst_port"])
o.add_attribute("hostname-dst", http_conversation["dst_host"])
o.add_attribute("layer3-protocol", "IP")
o.add_attribute("layer4-protocol", "TCP")
o.add_attribute("layer7-protocol", "HTTP")
misp_event.add_object(o)
method, path, http_version = http_conversation["url"].split(" ")
if http_conversation["dst_port"] == 80:
uri = "http://{}{}".format(http_conversation["dst_host"], path)
else:
uri = "http://{}:{}{}".format(
http_conversation["dst_host"],
http_conversation["dst_port"],
path
)
o = pymisp.MISPObject(name="http-request")
o.add_attribute("host", http_conversation["dst_host"])
o.add_attribute("method", method)
o.add_attribute("uri", uri)
o.add_attribute("ip-dst", http_conversation["dst_ip"])
misp_event.add_object(o)
# Add sandbox info like score and sandbox type
o = pymisp.MISPObject(name="sandbox-report")
sandbox_type = "saas" if tau_clients.is_task_hosted(analysis_link) else "on-premise"
o.add_attribute("score", result["score"])
o.add_attribute("sandbox-type", sandbox_type)
o.add_attribute("{}-sandbox".format(sandbox_type), "vmware-nsx-defender")
o.add_attribute("permalink", analysis_link)
misp_event.add_object(o)
# Add behaviors
# Check if its not empty first, as at least one attribute has to be set for sb-signature object
if result.get("malicious_activity", []):
o = pymisp.MISPObject(name="sb-signature")
o.add_attribute("software", "VMware NSX Defender")
for activity in result.get("malicious_activity", []):
a = pymisp.MISPAttribute()
a.from_dict(type="text", value=activity)
o.add_attribute("signature", **a)
misp_event.add_object(o)
# Add mitre techniques
for techniques in result.get("activity_to_mitre_techniques", {}).values():
for technique in techniques:
for misp_technique_id, misp_technique_name in self.techniques_galaxy.items():
if technique["id"].casefold() in misp_technique_id.casefold():
# If report details a sub-technique, trust the match
# Otherwise trust it only if the MISP technique is not a sub-technique
if "." in technique["id"] or "." not in misp_technique_id:
misp_event.add_tag(misp_technique_name)
break
return misp_event
def _parse_submission_response(response: Dict[str, Any]) -> Tuple[str, List[str]]:
"""
Parse the response from "submit_*" methods.
:param dict[str, any] response: the client response
:rtype: tuple(str, list[str])
:return: the task_uuid and whether the analysis is available
:raises ValueError: in case of any error
"""
task_uuid = response.get("task_uuid")
if not task_uuid:
raise ValueError("Submission failed, unable to process the data")
if response.get("score") is not None:
tags = [WORKFLOW_COMPLETE_TAG]
else:
tags = [WORKFLOW_INCOMPLETE_TAG]
return task_uuid, tags
def _unzip(zipped_data: bytes, password: bytes = DEFAULT_ZIP_PASSWORD) -> bytes:
"""
Unzip the data.
:param bytes zipped_data: the zipped data
:param bytes password: the password
:rtype: bytes
:return: the unzipped data
:raises ValueError: in case of any error
"""
try:
data_file_object = io.BytesIO(zipped_data)
with zipfile.ZipFile(data_file_object) as zip_file:
sample_hash_name = zip_file.namelist()[0]
return zip_file.read(sample_hash_name, password)
except (IOError, ValueError) as e:
raise ValueError(str(e))
def _download_from_vt(client: vt.Client, file_hash: str) -> bytes:
"""
Download file from VT.
:param vt.Client client: the VT client
:param str file_hash: the file hash
:rtype: bytes
:return: the downloaded data
:raises ValueError: in case of any error
"""
try:
buffer = io.BytesIO()
client.download_file(file_hash, buffer)
buffer.seek(0, 0)
return buffer.read()
except (IOError, vt.APIError) as e:
raise ValueError(str(e))
finally:
# vt.Client likes to free resources at shutdown, and it can be used as context to ease that
# Since the structure of the module does not play well with how MISP modules are organized
# let's play nice and close connections pro-actively (opened by "download_file")
if client:
client.close()
def _get_analysis_tags(
clients: Dict[str, nsx_defender.AnalysisClient],
task_uuid: str,
) -> List[str]:
"""
Get the analysis tags of a task.
:param dict[str, nsx_defender.AnalysisClient] clients: the analysis clients
:param str task_uuid: the task uuid
:rtype: list[str]
:return: the analysis tags
:raises exceptions.ApiError: in case of client errors
:raises exceptions.CommunicationError: in case of client communication errors
"""
client = clients[DEFAULT_ENDPOINT]
response = client.get_analysis_tags(task_uuid)
tags = set([])
for tag in response.get("analysis_tags", []):
tag_header = None
tag_type = tag["data"]["type"]
if tag_type == "av_family":
tag_header = "av-fam"
elif tag_type == "av_class":
tag_header = "av-cls"
elif tag_type == "lastline_malware":
tag_header = "nsx"
if tag_header:
tags.add("{}:{}".format(tag_header, tag["data"]["value"]))
return sorted(tags)
def _get_latest_analysis(
clients: Dict[str, nsx_defender.AnalysisClient],
file_hash: str,
) -> Optional[str]:
"""
Get the latest analysis.
:param dict[str, nsx_defender.AnalysisClient] clients: the analysis clients
:param str file_hash: the hash of the file
:rtype: str|None
:return: the task uuid if present, None otherwise
:raises exceptions.ApiError: in case of client errors
:raises exceptions.CommunicationError: in case of client communication errors
"""
def _parse_expiration(task_info: Dict[str, str]) -> datetime.datetime:
"""
Parse expiration time of a task
:param dict[str, str] task_info: the task
:rtype: datetime.datetime
:return: the parsed datetime object
"""
return datetime.datetime.strptime(task_info["expires"], "%Y-%m-%d %H:%M:%S")
results = []
for data_center, client in clients.items():
response = client.query_file_hash(file_hash=file_hash)
for task in response.get("tasks", []):
results.append(task)
if results:
return sorted(results, key=_parse_expiration)[-1]["task_uuid"]
else:
return None
def _get_mitre_techniques_galaxy(misp_client: pymisp.PyMISP) -> Dict[str, str]:
"""
Get all the MITRE techniques from the MISP galaxy.
:param pymisp.PyMISP misp_client: the MISP client
:rtype: dict[str, str]
:return: all techniques indexed by their id
"""
galaxy_attack_patterns = misp_client.get_galaxy(
galaxy=GALAXY_ATTACK_PATTERNS_UUID,
withCluster=True,
pythonify=True,
)
ret = {}
for cluster in galaxy_attack_patterns.clusters:
ret[cluster.value] = cluster.tag_name
return ret
def introspection() -> Dict[str, Union[str, List[str]]]:
"""
Implement interface.
:return: the supported MISP attributes
:rtype: dict[str, list[str]]
"""
return mispattributes
def version() -> Dict[str, Union[str, List[str]]]:
"""
Implement interface.
:return: the module config inside another dictionary
:rtype: dict[str, list[str]]
"""
moduleinfo["config"] = moduleconfig
return moduleinfo
def handler(q: Union[bool, str] = False) -> Union[bool, Dict[str, Any]]:
"""
Implement interface.
:param bool|str q: the input received
:rtype: bool|dict[str, any]
"""
if q is False:
return False
request = json.loads(q)
config = request.get("config", {})
# Load the client to connect to VMware NSX ATA (hard-fail)
try:
analysis_url = config.get("analysis_url")
login_params = {
"key": config["analysis_key"],
"api_token": config["analysis_api_token"],
}
# If 'analysis_url' is specified we are connecting on-premise
if analysis_url:
analysis_clients = {
DEFAULT_ENDPOINT: nsx_defender.AnalysisClient(
api_url=analysis_url,
login_params=login_params,
verify_ssl=bool(config.get("analysis_verify_ssl", True)),
)
}
logger.info("Connected NSX AnalysisClient to on-premise infrastructure")
else:
analysis_clients = {
data_center: nsx_defender.AnalysisClient(
api_url=tau_clients.NSX_DEFENDER_ANALYSIS_URLS[data_center],
login_params=login_params,
verify_ssl=bool(config.get("analysis_verify_ssl", True)),
) for data_center in [
tau_clients.NSX_DEFENDER_DC_WESTUS,
tau_clients.NSX_DEFENDER_DC_NLEMEA,
]
}
logger.info("Connected NSX AnalysisClient to hosted infrastructure")
except KeyError as ke:
logger.error("Integration with VMware NSX ATA failed to connect: %s", str(ke))
return {"error": "Error connecting to VMware NSX ATA: {}".format(ke)}
# Load the client to connect to MISP (soft-fail)
try:
misp_client = pymisp.PyMISP(
url=config["misp_url"],
key=config["misp_key"],
ssl=bool(config.get("misp_verify_ssl", True)),
)
except (KeyError, pymisp.PyMISPError):
logger.error("Integration with pyMISP disabled: no MITRE techniques tags")
misp_client = None
# Load the client to connect to VT (soft-fail)
try:
vt_client = vt.Client(apikey=config["vt_key"])
except (KeyError, ValueError):
logger.error("Integration with VT disabled: no automatic download of samples")
vt_client = None
# Decode and issue the request
try:
if request["attribute"]["type"] == "url":
sample_url = request["attribute"]["value"]
response = analysis_clients[DEFAULT_ENDPOINT].submit_url(sample_url)
task_uuid, tags = _parse_submission_response(response)
else:
if request["attribute"]["type"] == "malware-sample":
# Raise TypeError
file_data = _unzip(base64.b64decode(request["attribute"]["data"]))
file_name = request["attribute"]["value"].split("|", 1)[0]
hash_value = hashlib.sha1(file_data).hexdigest()
elif request["attribute"]["type"] == "attachment":
# Raise TypeError
file_data = base64.b64decode(request["attribute"]["data"])
file_name = request["attribute"].get("value")
hash_value = hashlib.sha1(file_data).hexdigest()
else:
hash_value = request["attribute"]["value"]
file_data = None
file_name = "{}.bin".format(hash_value)
# Check whether we have a task for that file
tags = []
task_uuid = _get_latest_analysis(analysis_clients, hash_value)
if not task_uuid:
# If we have no analysis, download the sample from VT
if not file_data:
if not vt_client:
raise ValueError("No file available locally and VT is disabled")
file_data = _download_from_vt(vt_client, hash_value)
tags.append(VT_DOWNLOAD_TAG)
# ... and submit it (_download_from_vt fails if no sample availabe)
response = analysis_clients[DEFAULT_ENDPOINT].submit_file(file_data, file_name)
task_uuid, _tags = _parse_submission_response(response)
tags.extend(_tags)
except KeyError as e:
logger.error("Error parsing input: %s", request["attribute"])
return {"error": "Error parsing input: {}".format(e)}
except TypeError as e:
logger.error("Error decoding input: %s", request["attribute"])
return {"error": "Error decoding input: {}".format(e)}
except ValueError as e:
logger.error("Error processing input: %s", request["attribute"])
return {"error": "Error processing input: {}".format(e)}
except (exceptions.CommunicationError, exceptions.ApiError) as e:
logger.error("Error issuing API call: %s", str(e))
return {"error": "Error issuing API call: {}".format(e)}
else:
analysis_link = tau_clients.get_task_link(
uuid=task_uuid,
analysis_url=analysis_clients[DEFAULT_ENDPOINT].base,
prefer_load_balancer=True,
)
# Return partial results if the analysis has yet to terminate
try:
tags.extend(_get_analysis_tags(analysis_clients, task_uuid))
report = analysis_clients[DEFAULT_ENDPOINT].get_result(task_uuid)
except (exceptions.CommunicationError, exceptions.ApiError) as e:
logger.error("Error retrieving the report: %s", str(e))
return {
"results": {
"types": "link",
"categories": ["External analysis"],
"values": analysis_link,
"tags": tags,
}
}
# Return the enrichment
try:
techniques_galaxy = None
if misp_client:
techniques_galaxy = _get_mitre_techniques_galaxy(misp_client)
result_parser = ResultParser(techniques_galaxy=techniques_galaxy)
misp_event = result_parser.parse(analysis_link, report)
for tag in tags:
if tag not in frozenset([WORKFLOW_COMPLETE_TAG]):
misp_event.add_tag(tag)
return {
"results": {
key: json.loads(misp_event.to_json())[key]
for key in ("Attribute", "Object", "Tag")
if (key in misp_event and misp_event[key])
}
}
except pymisp.PyMISPError as e:
logger.error("Error parsing the report: %s", str(e))
return {"error": "Error parsing the report: {}".format(e)}
def main():
"""Main function used to test basic functionalities of the module."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config-file",
dest="config_file",
required=True,
help="the configuration file used for testing",
)
parser.add_argument(
"-t",
"--test-attachment",
dest="test_attachment",
default=None,
help="the path to a test attachment",
)
args = parser.parse_args()
conf = configparser.ConfigParser()
conf.read(args.config_file)
config = {
"analysis_verify_ssl": conf.getboolean("analysis", "analysis_verify_ssl"),
"analysis_key": conf.get("analysis", "analysis_key"),
"analysis_api_token": conf.get("analysis", "analysis_api_token"),
"vt_key": conf.get("vt", "vt_key"),
"misp_url": conf.get("misp", "misp_url"),
"misp_verify_ssl": conf.getboolean("misp", "misp_verify_ssl"),
"misp_key": conf.get("misp", "misp_key"),
}
# TEST 1: submit a URL
j = json.dumps(
{
"config": config,
"attribute": {
"type": "url",
"value": "https://www.google.com",
}
}
)
print(json.dumps(handler(j), indent=4, sort_keys=True))
# TEST 2: submit a file attachment
if args.test_attachment:
with open(args.test_attachment, "rb") as f:
data = f.read()
j = json.dumps(
{
"config": config,
"attribute": {
"type": "attachment",
"value": "test.docx",
"data": base64.b64encode(data).decode("utf-8"),
}
}
)
print(json.dumps(handler(j), indent=4, sort_keys=True))
# TEST 3: submit a file hash that is known by NSX ATA
j = json.dumps(
{
"config": config,
"attribute": {
"type": "md5",
"value": "002c56165a0e78369d0e1023ce044bf0",
}
}
)
print(json.dumps(handler(j), indent=4, sort_keys=True))
# TEST 4 : submit a file hash that is NOT known byt NSX ATA
j = json.dumps(
{
"config": config,
"attribute": {
"type": "sha1",
"value": "2aac25ecdccf87abf6f1651ef2ffb30fcf732250",
}
}
)
print(json.dumps(handler(j), indent=4, sort_keys=True))
return 0
if __name__ == "__main__":
sys.exit(main())
|
VirusTotal/misp-modules
|
misp_modules/modules/expansion/vmware_nsx.py
|
Python
|
agpl-3.0
| 22,825
|
[
"Galaxy"
] |
c6e1df5412714092c23d4300a157f12ed3d7dc14717c944d8ae8dc01b810fa9b
|
#
# Copyright 2010, 2011 Brian R. D'Urso
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
import visa
class LockInAmplifier(visa.GpibInstrument):
#initialization
def __init__(self,*args,**kwargs):
visa.GpibInstrument.__init__(self,*args, **kwargs)
def read_x(self):
p = self.ask("OUTP? 1")
return float(p)
def read_y(self):
p = self.ask("OUTP? 2")
return float(p)
def read_phase(self):
p = self.ask("PHAS?")
return p
def set_sample_rate(self, rate):
# rate is a value from 0 (62.5mHz) to 13 (512Hz) or 14 for Trigger
self.write('SRAT '+str(rate))
def trigger(self):
self.write('TRIG')
def reset_buffer(self):
self.write('REST')
def read_xy(self):
xy = self.ask('SNAP? 1,2')
a = xy.split(',')
return float(a[0]), float(a[1])
def read_12(self):
xy = self.ask('SNAP? 10,11')
a = xy.split(',')
return float(a[0]), float(a[1])
sensitivity_list = ['2 nV/fA', '5 nV/fA', '10 nV/fA', '20 nV/fA', '50 nV/fA', '100 nV/fA', '200 nV/fA', '500 nV/fA',
'1 uV/pA', '2 uV/pA', '5 uV/pA', '10 uV/pA', '20 uV/pA', '50 uV/pA', '100 uV/pA', '200 uV/pA', '500 uV/pA',
'1 mV/nA', '2 mV/nA', '5 mV/nA', '10 mV/nA', '20 mV/nA', '50 mV/nA', '100 mV/nA', '200 mV/nA', '500 mV/nA',
'1 V/uA']
def set_sensitivity(self, sensitivity):
s = self.sensitivity_list.index(sensitivity)
self.write('SENS ' + str(s))
time_constant_list = ['10 us', '30 us', '100 us', '300 us',
'1 ms', '3 ms', '10 ms', '30 ms', '100 ms', '300 ms',
'1 s', '3 s', '10 s', '30 s', '100 s', '300 s',
'1 ks', '3 ks', '10 ks', '30 ks']
def set_time_constant(self, time_constant):
tc = self.time_constant_list.index(time_constant)
self.write('OFLT ' + str(tc))
|
dursobr/Pythics
|
pythics/instruments/SRS_SR830.py
|
Python
|
gpl-3.0
| 2,596
|
[
"Brian"
] |
0fc5d6f1693bdfb489f8c826bdb0200272a60be4d8283b6c3a0ecc5c66989d0e
|
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='classic'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "classic" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'"
num_dim = 1
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
# none for this problem
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -1.000000e+00 # xlower
clawdata.upper[0] = 4.000000e+00 # xupper
# Number of grid cells:
clawdata.num_cells[0] = 100 # mx
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 1
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 0
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.qNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.q0006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 20
clawdata.tfinal = 1.000000
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.000000e-01
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.800000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 1
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['mc']
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'periodic' # at xlower
clawdata.bc_upper[0] = 'periodic' # at xupper
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
amath574w2015/am574-class
|
homeworks/hw3/expflux/setrun.py
|
Python
|
bsd-3-clause
| 6,996
|
[
"NetCDF"
] |
3747982baa1f5432da37b2ff847c3d993c0dc8749ae33f022ce58659003def74
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import incore
from . import outcore
from . import fft
from . import aft
from . import df
from . import mdf
from .df import DF, GDF
from .rsdf import RSDF, RSGDF
from .mdf import MDF
from .aft import AFTDF
from .fft import FFTDF
from pyscf.df.addons import aug_etb
# For backward compatibility
pwdf = aft
PWDF = AFTDF
|
sunqm/pyscf
|
pyscf/pbc/df/__init__.py
|
Python
|
apache-2.0
| 937
|
[
"PySCF"
] |
8602eefa6af2b122e730515557d994c1e977291f738014b4adaea152896e01b2
|
"""
Tests the imports and exports of the Molecule object.
"""
import json
import numpy as np
import pytest
import qcelemental as qcel
from . import portal as ptl
def test_molecule_constructors():
### Water Dimer
water_psi = ptl.data.get_molecule("water_dimer_minima.psimol")
ele = np.array([8, 1, 1, 8, 1, 1]).reshape(-1, 1)
npwater = np.hstack((ele, water_psi.geometry * qcel.constants.conversion_factor("Bohr", "angstrom")))
water_from_np = ptl.Molecule.from_data(npwater, name="water dimer", dtype="numpy", frags=[3])
assert water_psi.compare(water_from_np)
assert water_psi.get_molecular_formula() == "H4O2"
# Check the JSON construct/deconstruct
water_from_json = ptl.Molecule(**water_psi.dict())
assert water_psi.compare(water_from_json)
### Neon Tetramer
neon_from_psi = ptl.data.get_molecule("neon_tetramer.psimol")
ele = np.array([10, 10, 10, 10]).reshape(-1, 1)
npneon = np.hstack((ele, neon_from_psi.geometry))
neon_from_np = ptl.Molecule.from_data(npneon, name="neon tetramer", dtype="numpy", frags=[1, 2, 3], units="bohr")
assert neon_from_psi.compare(neon_from_np)
# Check the JSON construct/deconstruct
neon_from_json = ptl.Molecule(**neon_from_psi.dict())
assert neon_from_psi.compare(neon_from_json)
assert neon_from_json.get_molecular_formula() == "Ne4"
assert water_psi.compare(ptl.Molecule.from_data(water_psi.to_string("psi4")))
def test_water_minima_data():
mol = ptl.data.get_molecule("water_dimer_minima.psimol")
assert sum(x == y for x, y in zip(mol.symbols, ["O", "H", "H", "O", "H", "H"])) == mol.geometry.shape[0]
assert mol.molecular_charge == 0
assert mol.molecular_multiplicity == 1
assert np.sum(mol.real) == mol.geometry.shape[0]
assert np.allclose(mol.fragments, [[0, 1, 2], [3, 4, 5]])
assert np.allclose(mol.fragment_charges, [0, 0])
assert np.allclose(mol.fragment_multiplicities, [1, 1])
assert hasattr(mol, "provenance")
assert np.allclose(
mol.geometry,
[
[2.81211080, 0.1255717, 0.0],
[3.48216664, -1.55439981, 0.0],
[1.00578203, -0.1092573, 0.0],
[-2.6821528, -0.12325075, 0.0],
[-3.27523824, 0.81341093, 1.43347255],
[-3.27523824, 0.81341093, -1.43347255],
],
) # yapf: disable
assert mol.get_hash() == "3c4b98f515d64d1adc1648fe1fe1d6789e978d34"
def test_water_minima_fragment():
mol = ptl.data.get_molecule("water_dimer_minima.psimol")
frag_0 = mol.get_fragment(0, orient=True)
frag_1 = mol.get_fragment(1, orient=True)
assert frag_0.get_hash() == "5f31757232a9a594c46073082534ca8a6806d367"
assert frag_1.get_hash() == "bdc1f75bd1b7b999ff24783d7c1673452b91beb9"
frag_0_1 = mol.get_fragment(0, 1)
frag_1_0 = mol.get_fragment(1, 0)
assert np.array_equal(mol.symbols[:3], frag_0.symbols)
assert np.allclose(mol.masses[:3], frag_0.masses)
assert np.array_equal(mol.symbols, frag_0_1.symbols)
assert np.allclose(mol.geometry, frag_0_1.geometry)
assert np.array_equal(np.hstack((mol.symbols[3:], mol.symbols[:3])), frag_1_0.symbols)
assert np.allclose(np.hstack((mol.masses[3:], mol.masses[:3])), frag_1_0.masses)
def test_pretty_print():
mol = ptl.data.get_molecule("water_dimer_minima.psimol")
assert isinstance(mol.pretty_print(), str)
def test_to_string():
mol = ptl.data.get_molecule("water_dimer_minima.psimol")
assert isinstance(mol.to_string("psi4"), str)
def test_water_orient():
# These are identical molecules, should find the correct results
mol = ptl.data.get_molecule("water_dimer_stretch.psimol")
frag_0 = mol.get_fragment(0, orient=True)
frag_1 = mol.get_fragment(1, orient=True)
# Make sure the fragments match
assert frag_0.get_hash() == frag_1.get_hash()
# Make sure the complexes match
frag_0_1 = mol.get_fragment(0, 1, orient=True, group_fragments=True)
frag_1_0 = mol.get_fragment(1, 0, orient=True, group_fragments=True)
assert frag_0_1.get_hash() == frag_1_0.get_hash()
mol = ptl.data.get_molecule("water_dimer_stretch2.psimol")
frag_0 = mol.get_fragment(0, orient=True)
frag_1 = mol.get_fragment(1, orient=True)
# Make sure the fragments match
assert frag_0.molecular_multiplicity == 1
assert frag_0.get_hash() == frag_1.get_hash()
# Make sure the complexes match
frag_0_1 = mol.get_fragment(0, 1, orient=True, group_fragments=True)
frag_1_0 = mol.get_fragment(1, 0, orient=True, group_fragments=True)
# Ghost fragments should prevent overlap
assert frag_0_1.molecular_multiplicity == 1
assert frag_0_1.get_hash() != frag_1_0.get_hash()
def test_molecule_errors():
mol = ptl.data.get_molecule("water_dimer_stretch.psimol")
data = mol.dict()
data["whatever"] = 5
with pytest.raises(ValueError):
ptl.Molecule(**data)
def test_molecule_repeated_hashing():
mol = ptl.Molecule(
**{
"symbols": ["H", "O", "O", "H"],
"geometry": [
1.73178198,
1.29095807,
1.03716028,
1.31566305,
-0.007440200000000001,
-0.28074722,
-1.3143081,
0.00849608,
-0.27416914,
-1.7241109,
-1.30793432,
1.02770172,
],
}
) # yapf: disable
h1 = mol.get_hash()
assert mol.get_molecular_formula() == "H2O2"
mol2 = ptl.Molecule(**json.loads(mol.json()), orient=False)
assert h1 == mol2.get_hash()
mol3 = ptl.Molecule(**json.loads(mol2.json()), orient=False)
assert h1 == mol3.get_hash()
|
psi4/DatenQM
|
qcfractal/interface/tests/test_molecule.py
|
Python
|
bsd-3-clause
| 5,746
|
[
"Psi4"
] |
2c466d30ef1391e2a2bfad845ef2b0511ba13e1bb058e908759a4d22d96c8d93
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.6.4'
from octopus.core import Octopus, TimeoutError, ResponseError # NOQA
from octopus.tornado_core import TornadoOctopus # NOQA
|
heynemann/octopus
|
octopus/__init__.py
|
Python
|
mit
| 196
|
[
"Octopus"
] |
8f68dad7cb68891ea0d7fdf509123c6681982427e060080ef59e62a26982a460
|
# this script starts a new AIMS calculation. Ethylene, SA2-CASSCF(2/2).
import numpy as np
import pyspawn
import pyspawn.general
import pyspawn.process_geometry as pg
import pyspawn.dictionaries as dicts
import sys
# terachemserver port
if sys.argv[1]:
port = int(sys.argv[1])
else:
"Please provide a port number as a command line argument"
sys.exit()
# Processing geometry.xyz file (positions are in hessian.hdf5 so we don't need them)
natoms, atoms, _, comment = pg.process_geometry('geometry.xyz')
# Getting atomic masses from the dictionary and converting to atomic units
# If specific isotopes are needed masses array can be set manually
mass_dict = dicts.get_atomic_masses()
masses = np.asarray([mass_dict[atom]*1822.0 for atom in atoms for i in range(3)])
widths_dict = {'C': 30.0, 'H': 6.0, 'N': 22.0}
widths = np.asarray([widths_dict[atom] for atom in atoms for i in range(3)])
# finite wigner temperature
wigner_temp = 0
# random number seed
seed=87062
# Velocity Verlet classical propagator
clas_prop = "vv"
# adapative 2nd-order Runge-Kutta quantum propagator
qm_prop = "fulldiag"
# adiabtic NPI quantum Hamiltonian
qm_ham = "adiabatic"
# use TeraChem CASSCF or CASCI to compute potentials
potential = "terachem_cas"
# initial time
t0 = 0.0
# time step
ts = 10.0
# final simulation time
tfinal = 5000.0
# number of dimensions
numdims = natoms*3
# number of electronic states
numstates = 2
# TeraChem job options
tc_options = {
"method": 'hf',
"basis": '6-31g',
"atoms": atoms,
"charge": 0,
"spinmult": 1,
"closed_shell": True,
"restricted": True,
"precision": "double",
"threall": 1.0e-20,
"convthre": 1.0e-08,
"casscf": "yes",
"closed": 7,
"active": 2,
"cassinglets": numstates,
"castargetmult": 1,
"cas_energy_states": [0, 1],
"cas_energy_mults": [1, 1],
}
# trajectory parameters
traj_params = {
# terachem port
"tc_port": port,
# initial time
"time": t0,
# time step
"timestep": ts,
# final simulation time
"maxtime": tfinal,
# coupling threshhold
"spawnthresh": (0.5 * np.pi) / ts / 20.0,
# initial electronic state (indexed such that 0 is the ground state)
"istate": 1,
# Gaussian widths
"widths": widths,
# atom labels
"atoms": tc_options["atoms"],
# nuclear masses (in a.u)
"masses": masses,
# terachem options (above)
"tc_options": tc_options
}
sim_params = {
# initial time
"quantum_time": traj_params["time"],
# time step
"timestep": traj_params["timestep"],
# final simulation time
"max_quantum_time": traj_params["maxtime"],
# initial qm amplitudes
"qm_amplitudes": np.ones(1,dtype=np.complex128),
# energy shift used in quantum propagation
"qm_energy_shift": 77.6,
}
# import routines needed for propagation
exec("pyspawn.import_methods.into_simulation(pyspawn.qm_integrator." + qm_prop + ")")
exec("pyspawn.import_methods.into_simulation(pyspawn.qm_hamiltonian." + qm_ham + ")")
exec("pyspawn.import_methods.into_traj(pyspawn.potential." + potential + ")")
exec("pyspawn.import_methods.into_traj(pyspawn.classical_integrator." + clas_prop + ")")
# check for the existence of files from a past run
pyspawn.general.check_files()
# set up first trajectory
traj1 = pyspawn.traj(numdims, numstates)
traj1.set_numstates(numstates)
traj1.set_numdims(numdims)
traj1.set_parameters(traj_params)
# sample initial position and momentum from Wigner distribution (requires hessian.hdf5)
traj1.initial_wigner(seed)
# set up simulation
sim = pyspawn.simulation()
sim.add_traj(traj1)
sim.set_parameters(sim_params)
# begin propagation
sim.propagate()
|
blevine37/pySpawn17
|
examples/ethylene_sacasscf/start_c2h4.py
|
Python
|
mit
| 4,064
|
[
"Gaussian",
"TeraChem"
] |
dce61c0a98063e5ab3a781a735cb36bf8674ef0eecf0282b1512c46b09b2c3ae
|
from geckopush import geckopush
import unittest
import json
# Declare initial dashboard widget and test values.
d = geckopush.Dashboard(api_key="api-key-123")
widget_key = "Widget-Key"
# Monkey patch urlopen method to append the url and headers to return dict.
def dummy_urlopen(url):
mock_api_return = {
'success': True,
'url': url.full_url,
'headers': url.headers,
}
json_payload = json.dumps(mock_api_return)
# The Widget.Push() method expects an object returned from the
# urllib.request.urlopen() call.
class Response(object):
@staticmethod
def read():
return json_payload.encode('utf-8')
return Response
geckopush.urllib.request.urlopen = dummy_urlopen
class Base(object):
"""
Base class which sets up the payload and push tests
"""
def setUp(self):
self.payload = None
self.widget = None
def testPayload(self):
"""
Testing the payload
"""
generated = self.widget.get_payload()
self.assertEqual(generated, self.payload, "Testing payload structure")
def testPush(self):
"""
Testing push functionality
"""
push_result = self.widget.push()
url = push_result['url']
headers = push_result['headers']
self.assertEqual(
url,
'https://push.geckoboard.com/v1/send/Widget-Key',
"Testing URL structure"
)
self.assertEqual(
headers,
{'Content-type': 'application/json'}
)
class TestBarChart(unittest.TestCase, Base):
def setUp(self):
self.widget = geckopush.BarChart(dashboard=d, widget_key=widget_key)
self.widget.add_data(data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
self.widget.x_axis_labels = ["one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "ten"]
self.widget.x_axis_type = "standard"
self.widget.y_axis_format = "decimal"
self.widget.y_axis_unit = "USD"
self.payload = {'data': {'series': [{'data': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}], 'x_axis': {'labels': ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten'], 'type': 'standard'}, 'y_axis': {'unit': 'USD', 'format': 'decimal'}}, 'api_key': 'api-key-123'}
class TestBulletGraph(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'sublabel': 'A test Bullet graph', 'comparative': {'point': '200'}, 'range': {'red': {'start': 0, 'end': 100}, 'green': {'start': 601, 'end': 1000}, 'amber': {'start': 101, 'end': 600}}, 'axis': {'point': ['0', '200', '400', '600', '800', '1000']}, 'measure': {'current': {'start': '0', 'end': '500'}, 'projected': {'start': '100', 'end': '900'}}, 'label': 'Test Bullet Graph'}, {'sublabel': 'womp womp womp', 'comparative': {'point': '100'}, 'range': {'red': {'start': 0, 'end': 200}, 'green': {'start': 301, 'end': 1000}, 'amber': {'start': 201, 'end': 300}}, 'axis': {'point': ['0', '200', '400', '600', '800', '1000']}, 'measure': {'current': {'start': '0', 'end': '800'}, 'projected': {'start': '600', 'end': '900'}}, 'label': 'Second Bullet Graph'}], 'orientation': None}, 'api_key': 'api-key-123'}
self.widget = geckopush.BulletGraph(dashboard=d,
widget_key=widget_key,
label='Test Bullet Graph',
axis=["0", "200", "400", "600", "800", "1000"],
comparative="200",
measure_start="0",
measure_end="500",
red_start=0,
red_end=100,
amber_start=101,
amber_end=600,
green_start=601,
green_end=1000,
sublabel="A test Bullet graph",
projected_start='100',
projected_end='900',
)
self.widget.add_data(label='Second Bullet Graph',
axis=["0", "200", "400", "600", "800", "1000"],
comparative="100",
measure_start="0",
measure_end="800",
red_start=0,
red_end=200,
amber_start=201,
amber_end=300,
green_start=301,
green_end=1000,
sublabel="womp womp womp",
projected_start='600',
projected_end='900'
)
class TestFunnel(unittest.TestCase, Base):
def setUp(self):
self.widget = geckopush.Funnel(dashboard=d, widget_key=widget_key)
self.widget.add_data(100, "one hundred")
self.widget.add_data(200, "two hundred")
self.widget.add_data(300, "three hundred")
self.widget.add_data(400, "four hundred")
self.widget.add_data(500, "five hundred")
self.widget.add_data(600, "six hundred")
self.widget.add_data(700, "seven hundred")
self.widget.add_data(800, "eight hundred")
self.payload = {'data': {'item': [{'value': 100, 'label': 'one hundred'}, {'value': 200, 'label': 'two hundred'}, {'value': 300, 'label': 'three hundred'}, {'value': 400, 'label': 'four hundred'}, {'value': 500, 'label': 'five hundred'}, {'value': 600, 'label': 'six hundred'}, {'value': 700, 'label': 'seven hundred'}, {'value': 800, 'label': 'eight hundred'}]}, 'api_key': 'api-key-123'}
class TestGecko(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'max': {'value': 50}, 'item': 26, 'min': {'value': 0}}, 'api_key': 'api-key-123'}
self.widget = geckopush.GeckoMeter(
dashboard=d,
widget_key=widget_key,
item=26,
min_value=0,
max_value=50
)
class TestHighchart(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'highchart': '{chart:{style: {color: "#b9bbbb"},renderTo:"container",backgroundColor:"transparent",lineColor:"rgba(35,37,38,100)",plotShadow: false,},credits:{enabled:false},title:{style: {color: "#b9bbbb"},text:"Monthly Average Temperature"},xAxis:{categories:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]},yAxis:{title:{style: {color: "#b9bbbb"}, text:"Temperature"}},legend:{itemStyle: {color: "#b9bbbb"}, layout:"vertical",align:"right",verticalAlign:"middle",borderWidth:0},series:[{color:"#108ec5",name:"NewYork",data:[17.0,22.0,24.8,24.1,20.1,14.1,8.6,2.5]},{color:"#52b238",name:"Berlin",data:[13.5,17.0,18.6,17.9,14.3,9.0,3.9,1.0]},{color:"#ee5728",name:"London",data:[11.9,15.2,17.0,16.6,14.2,10.3,6.6,4.8]}]}'}, 'api_key': 'api-key-123'}
highchart_str = "{chart:{style: {color: \"#b9bbbb\"},renderTo:\"container\",backgroundColor:\"transparent\",lineColor:\"rgba(35,37,38,100)\",plotShadow: false,},credits:{enabled:false},title:{style: {color: \"#b9bbbb\"},text:\"Monthly Average Temperature\"},xAxis:{categories:[\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]},yAxis:{title:{style: {color: \"#b9bbbb\"}, text:\"Temperature\"}},legend:{itemStyle: {color: \"#b9bbbb\"}, layout:\"vertical\",align:\"right\",verticalAlign:\"middle\",borderWidth:0},series:[{color:\"#108ec5\",name:\"NewYork\",data:[17.0,22.0,24.8,24.1,20.1,14.1,8.6,2.5]},{color:\"#52b238\",name:\"Berlin\",data:[13.5,17.0,18.6,17.9,14.3,9.0,3.9,1.0]},{color:\"#ee5728\",name:\"London\",data:[11.9,15.2,17.0,16.6,14.2,10.3,6.6,4.8]}]}"
self.widget = geckopush.HighCharts(dashboard=d,
widget_key=widget_key,
highchart=highchart_str)
class TestLeaderBoard(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'items': [{'previous_rank': 200, 'value': 100, 'label': 'Jack'}, {'previous_rank': 50, 'value': 50, 'label': 'Bob'}, {'previous_rank': 20, 'value': 100, 'label': 'Renaldo'}, {'previous_rank': 0, 'value': 0, 'label': 'Barney'}, {'previous_rank': 4, 'value': 96, 'label': 'Farnsworth'}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.Leaderboard(dashboard=d, widget_key=widget_key)
self.widget.add_data("Jack", 100, 200)
self.widget.add_data("Bob", 50, 50)
self.widget.add_data("Renaldo", 100, 20)
self.widget.add_data("Barney", 0, 0)
self.widget.add_data("Farnsworth", 96, 4)
class TestLineChart(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'x_axis': {'type': 'datetime', 'labels': ['2015-10-01', '2015-10-02', '2015-10-03', '2015-10-04', '2015-10-06']}, 'series': [{'data': [400, 500, 900, 900, 1000], 'name': 'One'}, {'data': [1000, 900, 800, 200, 100], 'name': 'Two'}], 'y_axis': {'format': 'currency', 'unit': 'USD'}}, 'api_key': 'api-key-123'}
self.widget = geckopush.LineChart(dashboard=d,
widget_key=widget_key)
self.widget.add_data(name="One", data=[400, 500, 900, 900, 1000])
self.widget.add_data(name="Two", data=[1000, 900, 800, 200, 100])
self.widget.add(x_axis_labels=["2015-10-01", "2015-10-02", "2015-10-03", "2015-10-04", "2015-10-06"])
self.widget.add(x_axis_type="datetime")
self.widget.add(y_axis_format="currency")
self.widget.add(y_axis_unit="USD")
class TestList(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': [{'title': {'text': '12345'}, 'description': 'These are numbers', 'label': {'color': '#ff2015', 'name': 'numbers'}}, {'title': {'text': 'abcde'}, 'description': 'These are letters', 'label': {'color': '#ffffff', 'name': 'letters'}}], 'api_key': 'api-key-123'}
self.widget = geckopush.List(dashboard=d,
widget_key=widget_key)
self.widget.add_data(text="12345", name="numbers",
color="#ff2015", description="These are numbers")
self.widget.add_data(text="abcde", name="letters", color= "#ffffff", description="These are letters")
class TestMap(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'points': {'point': [{'city': {'country_code': 'US', 'city_name': 'New York'}, 'size': '10'}, {'host': 'google.com'}, {'ip': '46.228.47.115'}, {'latitude': 22.434355, 'longitude': 11.12345, 'color': '#ffffff', 'size': 5}]}}, 'api_key': 'api-key-123'}
self.widget = geckopush.Map(dashboard=d, widget_key=widget_key)
self.widget.add_data(city_name="New York", country_code="US", size="10")
self.widget.add_data(host="google.com")
self.widget.add_data(ip="46.228.47.115")
self.widget.add_data(latitude=22.434355, longitude=11.12345, size=5, color="#ffffff")
class TestMonitoring(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'responseTime': '123 ms', 'status': 'up', 'downTime': 'Never'}, 'api_key': 'api-key-123'}
self.widget = geckopush.Monitoring(dashboard=d, widget_key=widget_key)
self.widget.add_data(status="up", downtime="Never", responsetime= "123 ms")
class TestPieChart(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'color': '13699c', 'value': 100, 'label': 'Slice 1'}, {'color': '198acd', 'value': 200, 'label': 'Slice 2'}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.PieChart(dashboard=d, widget_key=widget_key)
self.widget.add_data(100, "Slice 1", "13699c")
self.widget.add_data(200, "Slice 2", "198acd")
class TestNumberAndSecondary1(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'value': 15}, {'value': 25}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
self.widget.add_data(primary_value=15, secondary_value=25)
class TestNumberAndSecondary2(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'text': 'Hola Amigo', 'value': 15}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
self.widget.add_data(primary_value=15, text="Hola Amigo")
class TestNumberAndSecondary3(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'value': 15}, [12345, 12345, 15555, 12345, 12322]]}, 'api_key': 'api-key-123'}
self.widget = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
self.widget.add_data(primary_value=15, secondary_value=[12345, 12345, 15555, 12345, 12322])
class TestNumberAndSecondary4(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'value': 15}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.NumberAndSecondaryStat(dashboard=d, widget_key=widget_key)
self.widget.add_data(primary_value=15)
class TestRAG(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'prefix': '$', 'value': 150, 'text': 'Three'}, {'prefix': '$', 'value': 100, 'text': 'Two'}, {'prefix': '$', 'value': 50, 'text': 'One'}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.RAG(dashboard=d, widget_key=widget_key)
self.widget.add_data(text="One", value=50, prefix="$", color="green")
self.widget.add_data(text="Two", value=100, prefix="$", color="amber")
self.widget.add_data(text="Three", value=150, prefix="$", color="red")
class TestText(unittest.TestCase, Base):
def setUp(self):
self.payload = {'data': {'item': [{'text': 'Hello There My Friend', 'type': None}, {'text': 'How are you doing?', 'type': None}]}, 'api_key': 'api-key-123'}
self.widget = geckopush.Text(dashboard=d, widget_key=widget_key)
self.widget.add_data(text="Hello There My Friend", type=0)
self.widget.add_data(text="How are you doing?", type=1)
if __name__ == '__main__':
unittest.main()
|
patleeman/geckopush
|
tests/unit_test.py
|
Python
|
mit
| 14,360
|
[
"Amber"
] |
56eacea06e046ec9e63ee8901782cd124f21ad2b8942a4a86b0df3507c9cf8d1
|
"""
pybsm - python library for the BSM-SG pysical model.
"""
RELEASE = (0, 1, 0)
VERSION = RELEASE[0:2]
VERSION_STR = '.'.join((str(x) for x in VERSION))
__version__ = RELEASE_STR = '.'.join((str(x) for x in RELEASE))
# __all__ = ['galaxy', 'particles']
|
supergravity-org/pybsm
|
pybsm/__init__.py
|
Python
|
gpl-3.0
| 260
|
[
"Galaxy"
] |
81fd79a18a7fa31be54126eecd46878e7cbdc5d8f7b736fd0a9bc11d0efbc933
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCfgrib(PythonPackage):
"""Python interface to map GRIB files to the NetCDF Common Data Model
following the CF Convention using ecCodes."""
homepage = "https://github.com/ecmwf/cfgrib"
pypi = "cfgrib/cfgrib-0.9.8.5.tar.gz"
version('0.9.9.0', sha256='6ff0227df9c5ee34aa7d6ab1f7af3fbe6838523a8a9891c74040b419b03ad289')
version('0.9.8.5', sha256='07c224d7ac823a1df5738b96b9d3621515538f51f67e55044f9cc8ec1668e1bd')
# Warning: can create infinite dependency loop with xarray+io ^cfgrib+xarray
variant('xarray', default=False, description='Add xarray support')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', when='@0.9.8.5', type='build')
depends_on('py-attrs@19.2:', type=('build', 'run'))
depends_on('py-cffi', when='@0.9.8.5', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('py-eccodes', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-xarray@0.12.0:', when='+xarray', type=('build', 'run'))
@property
def import_modules(self):
modules = ['cfgrib']
if '+xarray' in self.spec:
modules.append('cf2cdm')
return modules
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-cfgrib/package.py
|
Python
|
lgpl-2.1
| 1,507
|
[
"NetCDF"
] |
36c15334aa1dd909c14aa43386d3edfacd46635cee262e5700db38625f3dd54e
|
######## What the script does:
######## provides preprocessing functions for medical text
#########################
import re
import string
import pickle
printable = set(string.printable)
def sanitize(txt):
return filter(lambda c: c in printable, txt)
try:
stopwords=pickle.load(open('Stopwords.pk','rb'))
except:
stopwords = []
abrev=[
('\.\.\.','.'),
('\+',' + '),
('\-',' - '),
('phi_phi_phi\[\*\*.{0,50}\*\*\]phi_phi_phi',''),
('/','/'),
('\. ',' . '),
(', ',' , '),
('; ',' ; '),
('/',' / '),
(' +',' ')
]
def process(line):
res = line
for (a,b) in abrev:
res=re.sub(a,b,res)
return res
table = string.maketrans("","")
##Remove puntuation, transforms numbers (if not removed in process)
def processbis(line):
res=line
res = res.translate(table, string.punctuation)
res=re.sub('0',' zero ',res)
res=re.sub('1',' one ',res)
res=re.sub('2',' two ',res)
res=re.sub('3',' three ',res)
res=re.sub('4',' four ',res)
res=re.sub('5',' five ',res)
res=re.sub('6',' six ',res)
res=re.sub('7',' seven ',res)
res=re.sub('8',' eight ',res)
res=re.sub('9',' nine ',res)
res=re.sub(' +',' ',res).strip()
return res
fullstops=['.',';', '[', '-']
midstops=['+','but','and','pt','except','reports','alert','complains','has','states','secondary','per','did','aox3']
negwords=['no','not','denies','without','non','unable']
shortnegwords = ['-']
keywords = fullstops + midstops + negwords + shortnegwords
## returns list of scopes and annotated sentence.
#Exple: Patient presents no sign of fever but complains of headaches
#Returns: [(2,5)], Patient presents no negxxxsignxxxneg negxxxofxxxneg negxxxfeverxxxneg but complains of headaches
def annotate(x):
y=x.split()
z=''
flag=0
res=[]
for i in range(len(y)):
wrd=y[i]
if (wrd in fullstops or wrd in midstops) and flag==1:
flag=0
res+=[(a,i-1)]
elif flag==1 and not wrd in negwords:
y[i]='negxxx'+wrd+'xxxneg'
if flag == 2:
flag = 0
if wrd in negwords:
flag=1
a=i
if wrd in shortnegwords: #short negwords only last for one word
flag = 2
a = i
return res,string.join(y)
try:
stopwords=pickle.load(open('Stopwords.pk','rb'))
except:
stopwords = []
try:
bigramlist=pickle.load(open('Bigrams.pk','rb'))
bigramlist = filter(lambda b: not any([k+' ' in b or ' '+k in b for k in keywords]), bigramlist)
except:
bigramlist = []
def bigrammed(sen):
sent=' '+sen.lower()+' '
senlist=sen.split()
stop=set(filter(lambda x:x in sent,stopwords))
for w in stop:
sent=re.sub(' '+w+' ',' ',sent)
sent=re.sub(' +',' ',sent).strip()
i=0
res=''
big=set(filter(lambda x:x in sent,bigramlist))
while i<len(senlist):
if i<len(senlist)-1 and senlist[i]+' '+senlist[i+1] in big:
res+= 'bigram_'+senlist[i]+'_'+senlist[i+1]+' '
i+=2
elif i<len(senlist)-2 and senlist[i+1] in stop and senlist[i]+' '+senlist[i+2] in big:
res+='bigram_'+senlist[i]+'_'+senlist[i+1]+'_'+senlist[i+2]+' '
i+=3
else:
res+=senlist[i]+' '
i+=1
return res.strip()
def parse_text(orig_txt, prefix):
try:
orig_txt = re.sub('PHI_PHI_PHI.*?PHI_PHI_PHI', '',orig_txt)
except:
print 'cannot parse orig text', orig_txt
sys.exit()
orig_txt = sanitize(orig_txt)
orig_txt = re.sub('(['+re.escape(string.punctuation)+'])', ' \g<1> ', orig_txt)
txt = orig_txt.lower()
txt = process(txt)
txt = bigrammed(txt)
_, txt = annotate(txt)
txt = txt.split()
orig_txt = orig_txt.split()
tokens = []
for w in txt:
if 'bigram_' in w:
w = w.replace('bigram_', '')
if not '_' in w:
tokens.append((w.replace('negxxx', '').replace('xxxneg','') , [prefix+w]))
else:
for part in w.split('_'):
tokens.append((part.replace('negxxx', '').replace('xxxneg','') , [prefix+w.replace('_', ' '), prefix+part]))
return [{'disp':t[0], 'repr':t[1]} for t in tokens]
def readVisit(f):
visit_str = ""
index = None
l = f.readline()
if l == "":
return None
assert "<visit>" in l
visit_str += l
while not "</visit>" in l:
l = f.readline()
visit_str += l
if l=="":
assert 0, "misformed file!"
return visit_str
tagname_pat = re.compile('<([a-zA-Z0-9]+)')
def extract_tagval(l):
return l.split('>')[1].split('<')[0]
def extract_tagname(l):
empty = False
key = None
if '/>' in l:
empty = True
match_obj = tagname_pat.search(l)
if match_obj:
key = match_obj.groups(1)[0]
endtag_pat = "</"+str(key)
return key, endtag_pat, empty
def shallow_parse_XML(incoming_text):
data = {}
val = ""
key = None
empty = False
for l in incoming_text.split('\n')[1:-1]:
if (key==None):
key,endtag_pat,empty = extract_tagname(l)
val += l+'\n'
if empty or re.search(endtag_pat, l):
data[key] = val.strip('\n')
key = None
val = ""
# print data
return data
|
yhalpern/anchorExplorer
|
Parsing.py
|
Python
|
bsd-2-clause
| 5,309
|
[
"VisIt"
] |
ed0068b19bde9cd7b83157e923935f52ac8949c4581c1eb1db270081d056d538
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import py
import pytest
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import EXIT_OK
from _pytest.main import EXIT_USAGEERROR
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(request, tmpdir_factory):
tmpdir = tmpdir_factory.mktemp("basedir", numbered=True)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
return tmpdir
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace(object):
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
self.pyargs = False
conftest._set_initial_conftests(Namespace())
class TestConftestValueAccessGlobal(object):
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
len(conftest._path2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
# assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join("adir"))
assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join("b"))
assert len(conftest._path2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod("a", basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
values = conftest._getconftestmodules(conf)
assert len(values) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile(
"""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
"""
)
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
values = conftest._getconftestmodules(p)
assert len(values) == 0
values = conftest._getconftestmodules(conf.dirpath())
assert len(values) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
values = conftest._getconftestmodules(conf.dirpath())
assert values[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
values = conftest._getconftestmodules(p)
assert len(values) == 1
assert values[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
values = conftest._getconftestmodules(conf.dirpath())
assert len(values) == 1
assert values[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", "test tests whatever .dotdir".split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ("whatever", ".dotdir"):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""
)
)
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert "warning: could not load initial" not in result.stdout.str()
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_conftest_symlink(testdir):
"""Ensure that conftest.py is used for resolved symlinks."""
real = testdir.tmpdir.mkdir("real")
realtests = real.mkdir("app").mkdir("tests")
testdir.tmpdir.join("symlinktests").mksymlinkto(realtests)
testdir.tmpdir.join("symlink").mksymlinkto(real)
testdir.makepyfile(
**{
"real/app/tests/test_foo.py": "def test1(fixture): pass",
"real/conftest.py": textwrap.dedent(
"""
import pytest
print("conftest_loaded")
@pytest.fixture
def fixture():
print("fixture_used")
"""
),
}
)
result = testdir.runpytest("-vs", "symlinktests")
result.stdout.fnmatch_lines(
[
"*conftest_loaded*",
"real/app/tests/test_foo.py::test1 fixture_used",
"PASSED",
]
)
assert result.ret == EXIT_OK
# Should not cause "ValueError: Plugin already registered" (#4174).
result = testdir.runpytest("-vs", "symlink")
assert result.ret == EXIT_OK
realtests.ensure("__init__.py")
result = testdir.runpytest("-vs", "symlinktests/test_foo.py::test1")
result.stdout.fnmatch_lines(
[
"*conftest_loaded*",
"real/app/tests/test_foo.py::test1 fixture_used",
"PASSED",
]
)
assert result.ret == EXIT_OK
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest()
assert result.ret == EXIT_USAGEERROR
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""
)
)
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""
)
)
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
conftest._confcutdir = testdir.tmpdir
monkeypatch.setattr(conftest, "_importconftest", impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""
)
)
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""
)
)
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""
)
)
p = sub.join("test_hello.py")
p.write("def test_hello(): pass")
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines(
"""
*--hello-world*
"""
)
class TestConftestVisibility(object):
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""
)
)
package.join("test_pkgroot.py").write(
textwrap.dedent(
"""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""
)
)
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""
)
)
swc.join("test_with_conftest.py").write(
textwrap.dedent(
"""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""
)
)
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(
textwrap.dedent(
"""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""
)
)
print("created directory structure:")
for x in testdir.tmpdir.visit():
print(" " + x.relto(testdir.tmpdir))
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize(
"chdir,testarg,expect_ntests_passed",
[
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
],
)
@pytest.mark.issue616
def test_parsefactories_relative_node_ids(
self, testdir, chdir, testarg, expect_ntests_passed
):
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" % (dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" % (testarg))
print("expected pass : %s" % (expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize(
"confcutdir,passed,error", [(".", 2, 0), ("src", 1, 1), (None, 1, 1)]
)
def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
"""Test that conftest files are detected only up to an ini file, unless
an explicit --confcutdir option is given.
"""
root = testdir.tmpdir
src = root.join("src").ensure(dir=1)
src.join("pytest.ini").write("[pytest]")
src.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fix1(): pass
"""
)
)
src.join("test_foo.py").write(
textwrap.dedent(
"""\
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""
)
)
root.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def out_of_reach(): pass
"""
)
)
args = [str(src)]
if confcutdir:
args = ["--confcutdir=%s" % root.join(confcutdir)]
result = testdir.runpytest(*args)
match = ""
if passed:
match += "*%d passed*" % passed
if error:
match += "*%d error*" % error
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(testdir):
testdir.makeconftest(
"""\
class DontTouchMe(object):
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
"""
)
testdir.makepyfile(
"""\
def test_some():
pass
"""
)
res = testdir.runpytest()
assert res.ret == 0
def test_conftest_exception_handling(testdir):
testdir.makeconftest(
"""\
raise ValueError()
"""
)
testdir.makepyfile(
"""\
def test_some():
pass
"""
)
res = testdir.runpytest()
assert res.ret == 4
assert "raise ValueError()" in [line.strip() for line in res.errlines]
def test_hook_proxy(testdir):
"""Session's gethookproxy() would cache conftests incorrectly (#2016).
It was decided to remove the cache altogether.
"""
testdir.makepyfile(
**{
"root/demo-0/test_foo1.py": "def test1(): pass",
"root/demo-a/test_foo2.py": "def test1(): pass",
"root/demo-a/conftest.py": """\
def pytest_ignore_collect(path, config):
return True
""",
"root/demo-b/test_foo3.py": "def test1(): pass",
"root/demo-c/test_foo4.py": "def test1(): pass",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*test_foo1.py*", "*test_foo3.py*", "*test_foo4.py*", "*3 passed*"]
)
def test_required_option_help(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true", required=True)
"""
)
)
result = testdir.runpytest("-h", x)
assert "argument --xyz is required" not in result.stdout.str()
assert "general:" in result.stdout.str()
|
pfctdayelise/pytest
|
testing/test_conftest.py
|
Python
|
mit
| 18,429
|
[
"VisIt"
] |
3cc3eafe7f2edeed67397a3ffc4b8b1637a56484de00730d81ce631648dd8d63
|
# Run tests with nosetests
import corex
import numpy as np
from functools import partial, update_wrapper
verbose = False
seed = 3
def generate_data(n_samples=100, group_sizes=[2], dim_hidden=2, missing=0):
Y_true = [np.random.randint(0, dim_hidden, n_samples) for _ in group_sizes]
X = np.hstack([np.repeat(Y_true[i][:,np.newaxis], size, axis=1) for i, size in enumerate(group_sizes)])
clusters = [i for i in range(len(group_sizes)) for _ in range(group_sizes[i])]
tcs = map(lambda z: (z-1)*np.log(dim_hidden), group_sizes)
X = np.where(np.random.random(X.shape) >= missing, X, -1)
return X, Y_true, clusters, tcs
def generate_noisy_data(n_samples=100, group_sizes=[2], erasure_p=0):
# Implement an erasure channel with erasure probability erasure_p
# The capacity of a single such channel is 1-erasure_p,
# So if we have group_size < 1/(1-p) , Shannon's bound forbids perfect recovery
# Or, 1 - 1/g < p
dim_hidden = 3
Y_true = [np.random.randint(0, 2, n_samples) for _ in group_sizes]
X = np.hstack([np.repeat(Y_true[i][:,np.newaxis], size, axis=1) for i, size in enumerate(group_sizes)])
X = np.where(np.random.random(X.shape) < erasure_p, 2, X) # Erasure channel
clusters = [i for i in range(len(group_sizes)) for _ in range(group_sizes[i])]
tcs = map(lambda z: (z-1)*np.log(2), group_sizes)
return X, Y_true, clusters, tcs
def check_correct(clusters, tcs, Y_true, X, corex):
assert np.array_equal(corex.transform(X), corex.labels) # Correctness of transform
assert np.array_equal(corex.clusters, clusters), str(zip(corex.clusters, clusters)) # Check connections
for j, tc in enumerate(tcs):
assert np.abs(corex.tcs[j]-tc)/tc < 0.1, "Values %f, %f" %(corex.tcs[j], tc) # TC relative error is small
assert len(set(map(tuple, zip(corex.labels.T[j], Y_true[j])))) == len(set(Y_true[j])), \
zip(corex.labels.T[j], Y_true[j]) # One-to-one correspondence of labels
def test_corex_all():
n_samples = 100
for group_sizes in [[2], [3, 2]]:
for dim_hidden in [2, 3]:
np.random.seed(seed)
X, Y_true, clusters, tcs = generate_data(n_samples=n_samples, group_sizes=group_sizes, dim_hidden=dim_hidden)
methods = [
corex.Corex(n_hidden=len(group_sizes), dim_hidden=dim_hidden, missing_values=-1, seed=seed, verbose=verbose).fit(X)
]
for i, method in enumerate(methods):
f = partial(check_correct, clusters, method.tcs, Y_true, X, method)
update_wrapper(f, check_correct)
f.description = 'method: ' + ['base', 'gaussian', 'discrete', 'discrete NT', 'gaussian NT', 'beta NT'][i] + \
', groups:' + str(group_sizes) + ', dim_hidden:' + str(dim_hidden) + ', seed: '+str(seed)
yield (f, )
def test_missing_values():
n_samples = 100
dim_hidden = 2
missing = 0.1
group_sizes = [10, 7] # Chance of entire row missing smaller than missing^n
np.random.seed(seed)
X, Y_true, clusters, tcs = generate_data(n_samples=n_samples, group_sizes=group_sizes,
dim_hidden=dim_hidden, missing=missing)
methods = [
corex.Corex(n_hidden=len(group_sizes), dim_hidden=dim_hidden, missing_values=-1, seed=seed, verbose=verbose).fit(X)
]
for i, method in enumerate(methods):
f = partial(check_correct, clusters, method.tcs, Y_true, X, method)
update_wrapper(f, check_correct)
f.description = 'missing values, '+ ['base', 'gaussian', 'discrete', 'discrete NT', 'gaussian NT'][i] + ', seed: '+str(seed)
yield (f, )
def test_near_shannon_limit():
X, Y_true, clusters, tcs = generate_noisy_data(n_samples=1000, group_sizes=[200], erasure_p=1.-3./200)
out = corex.Corex(n_hidden=1, seed=seed, verbose=verbose).fit(X)
assert max(np.mean(Y_true==out.labels.T), 1-np.mean(Y_true==out.labels.T)) > 0.95 # rate = 3*capacity, near perfect
X, Y_true, clusters, tcs = generate_noisy_data(n_samples=1000, group_sizes=[200], erasure_p=1.-1./200)
out = corex.Corex(n_hidden=1, seed=seed, verbose=verbose).fit(X)
assert max(np.mean(Y_true==out.labels.T), 1-np.mean(Y_true==out.labels.T)) < 0.9 # rate=capacity, not perfect
|
aaronplasek/CorEx
|
test_corex.py
|
Python
|
gpl-2.0
| 4,334
|
[
"Gaussian"
] |
b6c556fd0189c25ba53035dbe5024372bdf6501ba94fee2794ae61830b3047fc
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import subprocess
from paver.easy import *
from os.path import islink, isfile,join,basename,dirname,exists,relpath,abspath
from paver.setuputils import setup
try:
from paver.virtual import bootstrap, virtualenv
except ImportError, e:
info(
"VirtualEnv must be installed to enable 'paver bootstrap'. If you need this command, run: pip install virtualenv"
)
# Import parameters from the setup file.
sys.path.append('.')
from setup import (
setup_dict, get_project_files, print_success_message,
print_failure_message, _lint, _test, _test_all,
CODE_DIRECTORY, DOCS_DIRECTORY, TESTS_DIRECTORY, PYTEST_FLAGS)
from paver.easy import options, task, needs, consume_args
from paver.setuputils import install_distutils_tasks
options(setup=setup_dict,
star=Bunch(
sdir=path('temposeqcount/download'),
bindir=path('temposeqcount/bin')
),
FastQC=Bunch(
url='http://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.2.zip',
downloads=path('temposeqcount/download'),
installdir=join(sys.prefix,'lib')
),
fastx_lib=Bunch(
url='https://github.com/agordon/libgtextutils/releases/download/0.7/libgtextutils-0.7.tar.gz',
downloads=path('temposeqcount/download'),
installdir=join(sys.prefix, 'lib', 'libgtextutils')
),
fastx=Bunch(
url='https://github.com/agordon/fastx_toolkit/releases/download/0.0.14/fastx_toolkit-0.0.14.tar.bz2',
downloads=path('temposeqcount/download'),
installdir=join(sys.prefix, 'lib', 'fastx_toolkit')
),
ngsutils=Bunch(
url='https://github.com/ngsutils/ngsutils/archive/ngsutils-0.5.7.tar.gz',
downloads=path('temposeqcount/download'),
),
auto_barcode=Bunch(
url='https://github.com/mfcovington/auto_barcode/archive/2.1.2.tar.gz',
downloads=path('temposeqcount/download')
),
R=Bunch(
url='https://cran.r-project.org/src/base/R-3/R-3.2.3.tar.gz',
downloads=path('temposeqcount/download'),
installdir=join(sys.prefix, 'lib', "R-3.2.3")
),
rpy2=Bunch(
url='https://pypi.python.org/packages/source/r/rpy2/rpy2-2.7.8.tar.gz',
downloads=path('temposeqcount/download')
),
seqtk=Bunch(
url='https://github.com/lh3/seqtk.git',
downloads=path('temposeqcount/download')
),
environ=Bunch(
installdir=path('temposeqcount/lib')
),
settings=Bunch(
shell_file=path('temposeqcount/files/settings.sh')
),
samtools=Bunch(
sdir=path('temposeqcount/download'),
bindir=path('temposeqcount/bin')
),
help2man=Bunch(
sdir=path('temposeqcount/download'),
url='http://ftp.gnu.org/gnu/help2man/help2man-1.43.3.tar.gz',
),
libtool=Bunch(
sdir=path('temposeqcount/download'),
url='http://gnu.mirror.constant.com/libtool/libtool-2.4.tar.gz'
),
textinfo=Bunch(
sdir=path('temposeqcount/download'),
url='http://ftp.gnu.org/gnu/texinfo/texinfo-6.1.tar.gz'
),
graphviz=Bunch(
sdir=path('temposeqcount/download'),
url='https://github.com/ellson/graphviz/releases/download/Nightly/graphviz-2.41.20170103.1755.tar.gz'
),
virtualenv=Bunch(
packages_to_install=[],
no_site_packages=True)
)
INSTRUICTIONS = """
Run
$ source temposeqcount/bin/activate
to enter the virtual environment and
$ deactivate
to exit the environment.
"""
install_distutils_tasks()
## Miscellaneous helper functions
@task
def bootstrap(options):
"""Create virtualenv in ./bootstrap"""
try:
import virtualenv
except ImportError, e:
raise RuntimeError("Virtualenv is needed for bootstrap")
options.virtualenv.no_site_packages=False
options.bootstrap.no_site_packages=False
call_task("paver.virtualenv.boostrap")
@task
def make_download_dir(options):
currwd = os.getcwd()
sdir = path(currwd) / options.star.sdir
sh('mkdir -p %s' % (sdir))
@task
def download_compile_star(options):
"""installs the current package"""
starbin=join(sys.prefix,'bin','STAR')
if not exists(starbin):
info("Compiling STAR...")
currwd = os.getcwd()
sdir = path(currwd) / options.star.sdir
bdir = path(currwd) / options.star.bindir
dist = join(sys.prefix, 'bin', 'STAR')
if not islink(dist):
sh('(cd %s; wget https://github.com/alexdobin/STAR/archive/2.5.2b.tar.gz -O- | tar xzf -; cd STAR-2.5.2b; make; ln -s %s/STAR-2.5.2b/bin/Linux_x86_64/STAR %s; cd %s)' % (sdir, sdir, bdir, sdir))
@task
def installggplot():
"""install ggplot"""
try:
import ggplot
except ImportError:
cmd = 'pip install git+https://github.com/yhat/ggplot'
sh(cmd)
@task
def download_compile_seqtk(options):
"""Download and compile seqtk"""
appbin=join(sys.prefix, 'bin', 'seqtk')
srcdir = join(options.seqtk.downloads, "seqtk")
if not exists(appbin):
if exists(srcdir):
sh('cd %s ; cd seqtk;make' %(options.seqtk.downloads))
else:
sh('cd %s ;git clone %s ; cd seqtk ; make' %(options.seqtk.downloads, options.seqtk.url))
@task
def download_compile_samtools(options):
"""installs the current package"""
samtoolsbin=join(sys.prefix,'bin','samtools')
if not exists(samtoolsbin):
info("Compiling samtools....")
currwd = os.getcwd()
sdir = path(currwd) / options.samtools.sdir
sh('(cd %s; wget https://github.com/samtools/htslib/archive/1.1.tar.gz -O- | tar xzf -; mv htslib-* htslib;wget https://github.com/samtools/samtools/archive/1.1.tar.gz -O- | tar xzf -; mv samtools-* samtools; cd samtools;make; cd %s)' % (sdir, sdir))
@task
def install_fastax_lib(options):
"""Install lib required for fastx"""
info("Installing lib required for fastx ..." )
installdir = abspath(options.fastx_lib.installdir)
if not exists(installdir):
lbcmd = 'cd %s && wget %s && tar -xvf libgtextutils-0.7.tar.gz && cd libgtextutils-0.7 && ./configure --prefix=%s && make && make install' %(options.fastx_lib.downloads, options.fastx_lib.url, installdir)
sh(lbcmd)
@task
def install_fastx(options):
"""Install fastx toolkit ..."""
info("Installing fastx toolkit ...")
installdir = abspath(options.fastx.installdir)
libdir = abspath(options.fastx_lib.installdir)
if not exists(installdir):
lbcmd = 'cd %s && wget %s && tar -xjvf fastx_toolkit-0.0.14.tar.bz2 && cd fastx_toolkit-0.0.14 && export PKG_CONFIG_PATH=%s/lib/pkgconfig:$PKG_CONFIG_PATH && ./configure --prefix=%s && make && make install' %(options.fastx.downloads, options.fastx.url,libdir, installdir)
sh(lbcmd)
@task
def install_ngsutils(options):
"""Install ngsutils ..."""
info ("Installing ngsutils ...")
installdir = abspath(options.ngsutils.installdir)
srcdir = abspath(options.ngsutils.installdir)
if not exists(installdir):
lbcmd = 'cd %s && wget %s && tar -xvf ngsutils-0.5.7.tar.gz && cd ngsutils-ngsutils-0.5.7 && make' %(options.ngsutils.downloads, options.ngsutils.url)
sh(lbcmd)
@task
def install_R(options):
"""Install R64 ..."""
info("Installing R ...")
installdir=abspath(options.R.installdir)
dist = join(sys.prefix, 'bin', "R")
src = join(options.R.installdir, "bin", "R")
if not exists(dist):
lbcmd = 'cd %s && wget %s && tar -xvf R-3.2.3.tar.gz && cd R-3.2.3 && ./configure --enable-R-shlib --prefix=%s && make && make install ' %(options.R.downloads, options.R.url, installdir)
sh(lbcmd)
# make symbolic link to the bin dir
os.symlink(src, dist)
os.chmod(dist, 0755)
@task
@needs('install_R')
def setenviron(options):
"""Setup environment varaible"""
src = options.environ.installdir
rldpath = os.path.join(src, "R-3.2.3", "lib64", "R", "lib")
if 'LD_LIBRARY_PATH' not in os.environ:
os.environ['LD_LIBRARY_PATH']=rldpath
else:
os.environ['LD_LIBRARY_PATH']+=rldpath
@task
@needs('setenviron')
def install_rpy2(options):
"""Install rpy2 python package"""
info("Install rpy2 python package, require dependencies ...")
#rhome=join(sys.prefix, "lib", "R-2.12", "lib64", "R", "lib")
rinclude=join(sys.prefix, "lib", "R-3.2.3", "lib64", "R", "include")
dist = join(sys.prefix, 'download', 'rpy2-2.7.8')
if not exists(dist):
lbcmd = 'cd %s && wget %s && tar -xvf rpy2-2.7.8.tar.gz && cd rpy2-2.7.8 && export CFLAGS="-I%s" && python setup.py build install ' %(options.rpy2.downloads, options.rpy2.url, rinclude)
sh(lbcmd)
else:
lbcmd = 'cd %s && export CFLAGS="-I%s" && python setup.py build install ' %(dist,rinclude)
sh(lbcmd)
@task
def download_compile_textinfo(options):
"""installs the textinfo, required by graphviz"""
makeinfobin=join(sys.prefix, 'bin','makeinfo')
srcfile =join(sys.prefix, "download",'texinfo-6.1.tar.gz' )
if not exists(makeinfobin) and not exists(srcfile):
info("Installing textinfo...")
currwd = os.getcwd()
sdir = path(currwd) / options.textinfo.sdir
url=options.textinfo.url
info(sdir)
sh('(cd %s; wget %s; tar -xzvf texinfo-6.1.tar.gz;cd texinfo-6.1;./configure --prefix=%s/texinfo-6.1;make;make install)' %(sdir,url, sdir))
@task
def download_compile_help2man(options):
"""installs the help2man, required by graphviz"""
help2manbin=join(sys.prefix, 'bin','help2man')
info(help2manbin)
srcfile =join(sys.prefix, "download",'help2man-1.43.3.tar.gz')
if not exists(help2manbin) and not exists(srcfile):
info("Installing help2man...")
currwd = os.getcwd()
sdir = path(currwd) / options.help2man.sdir
url = options.help2man.url
src = join(sys.prefix, "download",'help2man-1.43.3' )
info(sdir)
sh('cd %s; wget %s;tar -xzvf help2man-1.43.3.tar.gz; cd help2man-1.43.3; ./configure CC="cc" --prefix=%s;make;make install' %(sdir,url,src))
@task
#@needs('download_compile_help2man', 'download_compile_textinfo')
def download_compile_libtool(options):
"""installs libtool, needed by graphviz ... """
libtoolbin=join(sys.prefix, 'bin','libtool')
srcfile =join(sys.prefix, "download",'libtool-2.4.tar.gz')
if not exists(libtoolbin) and not exists(srcfile):
info("Installing libtool, needed by graphviz ...")
currwd = os.getcwd()
sdir = path(currwd) / options.libtool.sdir
url = options.libtool.url
info(sdir)
sh('(cd %s; wget %s; tar -xzvf libtool-2.4.tar.gz;cd libtool-2.4;./configure CC="cc" --prefix=%s/libtool-2.4;make;make install)' %(sdir,url, sdir))
@task
#@needs('download_compile_libtool')
def download_compile_graphviz(options):
"""installs the current package"""
graphvizbin=join(sys.prefix,'bin','dot')
currwd=os.getcwd()
ssdir =path(currwd) / options.graphviz.sdir / "graphviz-2.41.20170103.1755"
info(graphvizbin)
if not exists(graphvizbin):
info("Installing graphviz...")
#sdir =path(currwd) / options.graphviz.sdir
info(ssdir)
sh('(cd %s;rm -rf %s; wget %s -O- | tar xzf -;cd graphviz-2.41.20170103.1755;./configure --prefix=%s;make;make install)' %(options.graphviz.sdir,ssdir,options.graphviz.url,ssdir))
#export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:~/data/jimaprogramming/python/temposeqcount/temposeqcount/lib/R-3.2.3/lib64/R/lib/
@task
def insallRpackages(options):
"""Install R packages that cannot be installed using pip install ..."""
#from rpy2.robjects.packages import importr
src = join(sys.prefix, "lib", "R-3.2.3", "lib64", "R", "lib")
spe = "$"
cmd = 'export LD_LIBRARY_PATH=%sLD_LIBRARY_PATH:%s' %(spe,src)
sh(cmd)
# import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
#from rpy2.robjects.vectors import StrVector
packageNames = ('ggplot2')
if all(rpackages.isinstalled(x) for x in packageNames):
have_packages = True
else:
have_packages = False
if not have_packages:
#utils = rpackages.importr('utils')
#utils.chooseCRANmirror(ind=1, useHTTPS=False)
packnames_to_install = [x for x in packageNames if not rpackages.isinstalled(x)]
# if len(packnames_to_install) > 0:
# utils.install_packages(StrVector(packnames_to_install))
if len(packnames_to_install) > 0:
# install biocondcutor package
base = rpackages.importr('base')
base.source("http://www.bioconductor.org/biocLite.R")
biocinstaller = rpackages.importr("BiocInstaller")
biocinstaller.biocLite("ggplot2", suppressUpdates=True)
@task
def download_install_fastqc(options):
import zipfile
from glob import glob
dlpath = join(options.FastQC.downloads,'fastqc_v*.zip')
fastqczip = glob(dlpath)
# No need to redownload
if not len(fastqczip):
info("Downloading FastQC from %s" % options.FastQC.url)
dlcmd = 'cd %s && [ ! -e fastqc*.zip ] && wget %s' % (options.FastQC.downloads,options.FastQC.url)
sh(dlcmd)
else:
info("FastQC Already downloaded")
fastqczip = glob(dlpath)
fqcdir = join(options.FastQC.installdir,'FastQC')
# Check to see if it is extracted already
if not exists(fqcdir):
info("Unpacking FastQC")
zfh = zipfile.ZipFile(fastqczip[-1])
zfh.extractall(options.FastQC.installdir)
zfh.close()
else:
info("FastQC already unpacked")
# Make symlink to bin
src = relpath(join(fqcdir,'fastqc'),join(sys.prefix,'bin'))
dst = join(sys.prefix,'bin','fastqc')
if not exists(dst):
info("Installing fastqc symlink")
os.symlink(src,dst)
os.chmod(dst, 0755)
else:
info("fastqc symlink already exists")
@task
def set_ld_path(options):
"""Create setting.sh file and source it"""
src = options.settings.shell_file
if exists(src):
next
else:
with open(src, 'w') as myfile:
installdir = sys.prefix
rldpath = os.path.join(installdir, "lib", "R-3.2.3", "lib64","R", "lib")
sep ="$"
ldpath = "export LD_LIBRARY_PATH=%sLD_LIBRARY_PATH:%s\n"%(sep, rldpath)
myfile.write(ldpath)
info(src)
sh('source %s' %(src))
def print_passed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
class cwd(object):
"""Class used for temporarily changing directories. Can be though of
as a `pushd /my/dir' then a `popd' at the end.
"""
def __init__(self, newcwd):
""":param newcwd: directory to make the cwd
:type newcwd: :class:`str`
"""
self.newcwd = newcwd
def __enter__(self):
self.oldcwd = os.getcwd()
os.chdir(self.newcwd)
return os.getcwd()
def __exit__(self, type_, value, traceback):
# This acts like a `finally' clause: it will always be executed.
os.chdir(self.oldcwd)
## Task-related functions
def _doc_make(*make_args):
"""Run make in sphinx' docs directory.
:return: exit code
"""
if sys.platform == 'win32':
# Windows
make_cmd = ['make.bat']
else:
# Linux, Mac OS X, and others
make_cmd = ['make']
make_cmd.extend(make_args)
# Account for a stupid Python "bug" on Windows:
# <http://bugs.python.org/issue15533>
with cwd(DOCS_DIRECTORY):
retcode = subprocess.call(make_cmd)
return retcode
## Tasks
@task
def init():
"""Initializing everything so you can start working"""
info ("virtual environment successfully bootstrapped.")
info (INSTRUICTIONS)
@task
@needs('install_python_dependencies','install_other_dependencies')
def install_dependencies():
pass
@task
#@needs('download_compile_star', 'download_install_fastqc', 'download_compile_seqtk','download_compile_samtools','install_fastax_lib', 'install_fastx', 'in)
@needs('make_download_dir','download_compile_star', 'download_install_fastqc', 'download_compile_seqtk','download_compile_samtools','install_fastax_lib', 'install_fastx', 'download_compile_help2man', 'download_compile_textinfo','download_compile_libtool','download_compile_graphviz')
def install_other_dependencies():
pass
@task
def install_python_dependencies():
import pip
if pip.__version__ > 6.0:
sh('pip install numpy') # pip failed to install from requirement file
sh('pip install -r requirements-dev.txt')
else:
sh('pip install numpy')
sh('pip install -r requirements-dev.txt --cache-dir temposeqcount/download/.pip_cache')
@task
def install_python_dependencies_nodeps():
"""Install python package without installing dependencies"""
import pip
if (pip.__version__ > 6.0):
sh('pip install numpy')
sh('pip install -r requirements_nodeps.txt')
else:
sh('pip install numpy')
sh('pip install -r requirements_nodeps.txt --cache-dir temposeqcount/download/.pip_cache')
@task
@needs('install_dependencies')
def prepare():
"""Prepare complete environment
"""
pass
@task
@needs('prepare','setuptools.command.install')
def install():
pass
@task
@needs('install')
def develop():
pass
@task
@needs('prepare','doc_html', 'setuptools.command.sdist')
def sdist():
"""Build the HTML docs and the tarball."""
pass
@task
def test():
"""Run the unit tests."""
raise SystemExit(_test())
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
from temposeqcount.main import main
raise SystemExit(main([CODE_DIRECTORY] + args))
@task
def commit():
"""Commit only if all the tests pass."""
if _test_all() == 0:
subprocess.check_call(['git', 'commit'])
else:
print_failure_message('\nTests failed, not committing.')
@task
def coverage():
"""Run tests and show test coverage report."""
try:
import pytest_cov # NOQA
except ImportError:
print_failure_message(
'Install the pytest coverage plugin to use this task, '
"i.e., `pip install pytest-cov'.")
raise SystemExit(1)
import pytest
pytest.main(PYTEST_FLAGS + [
'--cov', CODE_DIRECTORY,
'--cov-report', 'term-missing',
TESTS_DIRECTORY])
@task # NOQA
def doc_watch():
"""Watch for changes in the docs and rebuild HTML docs when changed."""
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
print_failure_message('Install the watchdog package to use this task, '
"i.e., `pip install watchdog'.")
raise SystemExit(1)
class RebuildDocsEventHandler(FileSystemEventHandler):
def __init__(self, base_paths):
self.base_paths = base_paths
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event: The event object representing the file system event.
:type event: :class:`watchdog.events.FileSystemEvent`
"""
for base_path in self.base_paths:
if event.src_path.endswith(base_path):
super(RebuildDocsEventHandler, self).dispatch(event)
# We found one that matches. We're done.
return
def on_modified(self, event):
print_failure_message('Modification detected. Rebuilding docs.')
# # Strip off the path prefix.
# import os
# if event.src_path[len(os.getcwd()) + 1:].startswith(
# CODE_DIRECTORY):
# # sphinx-build doesn't always pick up changes on code files,
# # even though they are used to generate the documentation. As
# # a workaround, just clean before building.
doc_html()
print_success_message('Docs have been rebuilt.')
print_success_message(
'Watching for changes in project files, press Ctrl-C to cancel...')
handler = RebuildDocsEventHandler(get_project_files())
observer = Observer()
observer.schedule(handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
@needs('doc_html')
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
@task
def get_tasks():
"""Get all paver-defined tasks."""
from paver.tasks import environment
for task in environment.get_tasks():
print(task.shortname)
@task
@needs('install_python_dependencies')
def doc_man():
"""Build man page"""
retcode=_doc_make('man')
if retcode:
raise SystemExit(retcode)
@task
def doc_html():
"""Build the HTML docs."""
retcode = _doc_make('html')
if retcode:
raise SystemExit(retcode)
@task
def doc_clean():
"""Clean (delete) the built docs."""
retcode = _doc_make('clean')
if retcode:
raise SystemExit(retcode)
|
demis001/raslpipe
|
pavement.py
|
Python
|
mit
| 23,467
|
[
"Bioconductor"
] |
fd14c2bcf2111b1a0f5d1be4ecad2e403dbe82db129e0007883302c6cce2a1dc
|
"""
Migration script to create a new 'sequencer' table
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from sqlalchemy.exc import *
from galaxy.model.custom_types import *
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
# Table to add
Sequencer_table = Table( 'sequencer', metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", TrimmedString( 255 ), nullable=False ),
Column( "description", TEXT ),
Column( "sequencer_type_id", TrimmedString( 255 ), nullable=False ),
Column( "version", TrimmedString( 255 ) ),
Column( "form_definition_id", Integer, ForeignKey( "form_definition.id" ), index=True ),
Column( "form_values_id", Integer, ForeignKey( "form_values.id" ), index=True ),
Column( "deleted", Boolean, index=True, default=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
# create the sequencer table
try:
Sequencer_table.create()
except Exception, e:
log.debug( "Creating 'sequencer' table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
# delete sequencer table
try:
Sequencer_table = Table( "sequencer", metadata, autoload=True )
except NoSuchTableError:
Sequencer_table = None
log.debug( "Failed loading table sequencer" )
if Sequencer_table:
try:
Sequencer_table.drop()
except Exception, e:
log.debug( "Deleting 'sequencer' table failed: %s" % str( e ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0063_sequencer_table.py
|
Python
|
gpl-3.0
| 2,026
|
[
"Galaxy"
] |
d9acaf8b1e87cf54ba2822003f5e1bf0940df56d3d62e16fd09650000be89fe2
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from openedx.core.lib.xblock_utils import wrap_xblock
from openedx.core.lib.url_utils import quote_slashes
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from openedx.core.djangoapps.course_groups.cohorts import get_course_cohorts, is_course_cohorted, DEFAULT_COHORT_NAME
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from bulk_email.models import BulkEmailFlag
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = HTML("<a href=\"{}\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if BulkEmailFlag.feature_enabled(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, reports_enabled))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(_("Enrollment data is now available in {dashboard_link}.")).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
from openedx.stanford.lms.djangoapps.instructor.views.instructor_dashboard import student_admin_section_data
section_data.update(student_admin_section_data(course_key))
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
from openedx.stanford.lms.djangoapps.instructor.views.instructor_dashboard import data_download_section_data
section_data.update(data_download_section_data(course_key))
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
from openedx.stanford.lms.djangoapps.instructor.views.instructor_dashboard import send_email_section_data
section_data.update(send_email_section_data())
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = HTML(u"<a href=\"{0}\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
from openedx.stanford.lms.djangoapps.instructor.views.instructor_dashboard import metrics_section_data
section_data.update(metrics_section_data(course_key))
return section_data
|
caesar2164/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 32,750
|
[
"VisIt"
] |
022063698dddc9c106bff4f72e0fb80cbd504758cbdb94ff0ead6c0d360b8227
|
"""Handle installation and updates of bcbio-nextgen, third party software and data.
Enables automated installation tool and in-place updates to install additional
data and software.
"""
from __future__ import print_function
import argparse
import collections
import contextlib
import datetime
import dateutil
from distutils.version import LooseVersion
import gzip
import json
import os
import shutil
import subprocess
import sys
import glob
import six
from six.moves import urllib
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.cwl import create
from bcbio.pipeline import genome, version
from bcbio.variation import effects
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
REMOTES = {
"requirements": "https://raw.githubusercontent.com/bcbio/bcbio-nextgen/master/requirements-conda.txt",
"gitrepo": "https://github.com/bcbio/bcbio-nextgen.git",
"cloudbiolinux": "https://github.com/chapmanb/cloudbiolinux/archive/master.tar.gz",
"genome_resources": "https://raw.githubusercontent.com/bcbio/bcbio-nextgen/master/config/genomes/%s-resources.yaml",
"snpeff_dl_url": ("http://downloads.sourceforge.net/project/snpeff/databases/v{snpeff_ver}/"
"snpEff_v{snpeff_ver}_{genome}.zip")}
SUPPORTED_GENOMES = ["GRCh37", "hg19", "hg38", "hg38-noalt", "mm10", "mm9",
"rn6", "rn5", "canFam3", "dm3", "galGal4", "phix",
"pseudomonas_aeruginosa_ucbpp_pa14", "sacCer3", "TAIR10",
"WBcel235", "xenTro3", "GRCz10", "GRCz11", "Sscrofa11.1", "BDGP6"]
TARBALL_DIRECTORIES = ["bwa", "rtg", "hisat2"]
SUPPORTED_INDEXES = TARBALL_DIRECTORIES +\
["bbmap", "bowtie", "bowtie2", "minimap2", "novoalign", "twobit", "bismark",
"snap", "star", "seq"]
DEFAULT_INDEXES = ["rtg"]
Tool = collections.namedtuple("Tool", ["name", "fname"])
def upgrade_bcbio(args):
"""Perform upgrade of bcbio to latest release, or from GitHub development version.
Handles bcbio, third party tools and data.
"""
print("Upgrading bcbio")
args = add_install_defaults(args)
if args.upgrade in ["stable", "system", "deps", "development"]:
if args.upgrade == "development":
anaconda_dir = _update_conda_latest()
_check_for_conda_problems()
print("Upgrading bcbio-nextgen to latest development version")
pip_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "pip")
git_tag = "@%s" % args.revision if args.revision != "master" else ""
_pip_safe_ssl([[pip_bin, "install", "--upgrade", "--no-deps",
"git+%s%s#egg=bcbio-nextgen" % (REMOTES["gitrepo"], git_tag)]], anaconda_dir)
print("Upgrade of bcbio-nextgen development code complete.")
else:
_update_conda_packages()
_check_for_conda_problems()
print("Upgrade of bcbio-nextgen code complete.")
if args.cwl and args.upgrade:
_update_bcbiovm()
try:
_set_matplotlib_default_backend()
except OSError:
pass
if args.tooldir:
with bcbio_tmpdir():
print("Upgrading third party tools to latest versions")
_symlink_bcbio(args, script="bcbio_nextgen.py")
_symlink_bcbio(args, script="bcbio_setup_genome.py")
_symlink_bcbio(args, script="bcbio_prepare_samples.py")
_symlink_bcbio(args, script="bcbio_fastq_umi_prep.py")
if args.cwl:
_symlink_bcbio(args, "bcbio_vm.py", "bcbiovm")
_symlink_bcbio(args, "python", "bcbiovm", "bcbiovm")
upgrade_thirdparty_tools(args, REMOTES)
print("Third party tools upgrade complete.")
if args.toolplus:
print("Installing additional tools")
_install_toolplus(args)
if args.install_data:
for default in DEFAULT_INDEXES:
if default not in args.aligners:
args.aligners.append(default)
if len(args.aligners) == 0:
print("Warning: no aligners provided with `--aligners` flag")
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
else:
with bcbio_tmpdir():
print("Upgrading bcbio-nextgen data files")
upgrade_bcbio_data(args, REMOTES)
print("bcbio-nextgen data upgrade complete.")
if args.isolate and args.tooldir:
print("Isolated tool installation not automatically added to environmental variables")
print(" Add:\n {t}/bin to PATH".format(t=args.tooldir))
save_install_defaults(args)
args.datadir = _get_data_dir()
_install_container_bcbio_system(args.datadir)
print("Upgrade completed successfully.")
return args
def _pip_safe_ssl(cmds, anaconda_dir):
"""Run pip, retrying with conda SSL certificate if global certificate fails.
"""
try:
for cmd in cmds:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
_set_pip_ssl(anaconda_dir)
for cmd in cmds:
subprocess.check_call(cmd)
def _set_pip_ssl(anaconda_dir):
"""Set PIP SSL certificate to installed conda certificate to avoid SSL errors
"""
if anaconda_dir:
cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file
def _set_matplotlib_default_backend():
"""
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
"""
if _matplotlib_installed():
import matplotlib
matplotlib.use('Agg', force=True)
config = matplotlib.matplotlib_fname()
if os.access(config, os.W_OK):
with file_transaction(config) as tx_out_file:
with open(config) as in_file, open(tx_out_file, "w") as out_file:
for line in in_file:
if line.split(":")[0].strip() == "backend":
out_file.write("backend: agg\n")
else:
out_file.write(line)
def _matplotlib_installed():
try:
import matplotlib
except ImportError:
return False
return True
def _symlink_bcbio(args, script="bcbio_nextgen.py", env_name=None, prefix=None):
"""Ensure a bcbio-nextgen script symlink in final tool directory.
"""
if env_name:
bcbio_anaconda = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))),
"envs", env_name, "bin", script)
else:
bcbio_anaconda = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script)
bindir = os.path.join(args.tooldir, "bin")
if not os.path.exists(bindir):
os.makedirs(bindir)
if prefix:
script = "%s_%s" % (prefix, script)
bcbio_final = os.path.join(bindir, script)
if not os.path.exists(bcbio_final):
if os.path.lexists(bcbio_final):
subprocess.check_call(["rm", "-f", bcbio_final])
subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final])
def _install_container_bcbio_system(datadir):
"""Install limited bcbio_system.yaml file for setting core and memory usage.
Adds any non-specific programs to the exposed bcbio_system.yaml file, only
when upgrade happening inside a docker container.
"""
base_file = os.path.join(datadir, "config", "bcbio_system.yaml")
if not os.path.exists(base_file):
return
expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
expose = set(["memory", "cores", "jvm_opts"])
with open(base_file) as in_handle:
config = yaml.safe_load(in_handle)
if os.path.exists(expose_file):
with open(expose_file) as in_handle:
expose_config = yaml.safe_load(in_handle)
else:
expose_config = {"resources": {}}
for pname, vals in config["resources"].items():
expose_vals = {}
for k, v in vals.items():
if k in expose:
expose_vals[k] = v
if len(expose_vals) > 0 and pname not in expose_config["resources"]:
expose_config["resources"][pname] = expose_vals
if expose_file and os.path.exists(os.path.dirname(expose_file)):
with open(expose_file, "w") as out_handle:
yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False)
return expose_file
def _get_conda_bin():
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if os.path.exists(conda_bin):
return conda_bin
def _get_mamba_bin():
mamba_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "mamba")
if os.path.exists(mamba_bin):
return mamba_bin
def _check_for_conda_problems():
"""Identify post-install conda problems and fix.
- libgcc upgrades can remove libquadmath, which moved to libgcc-ng
"""
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
lib_dir = os.path.join(os.path.dirname(conda_bin), os.pardir, "lib")
for l in ["libgomp.so.1", "libquadmath.so"]:
if not os.path.exists(os.path.join(lib_dir, l)):
subprocess.check_call([conda_bin, "install", "-f", "--yes"] + channels + ["libgcc-ng"])
def _update_bcbiovm():
"""Update or install a local bcbiovm install with tools and dependencies.
"""
print("## CWL support with bcbio-vm")
python_env = "python=3.6"
conda_bin, env_name = _add_environment("bcbiovm", python_env)
channels = _get_conda_channels(conda_bin)
base_cmd = [conda_bin, "install", "--yes", "--name", env_name] + channels
subprocess.check_call(base_cmd + [python_env, "nomkl", "bcbio-nextgen"])
extra_uptodate = ["cromwell"]
subprocess.check_call(base_cmd + [python_env, "bcbio-nextgen-vm"] + extra_uptodate)
def _get_envs(conda_bin):
info = json.loads(subprocess.check_output("{conda_bin} info --envs --json".format(**locals()), shell=True))
return [e for e in info["envs"] if e.startswith(info["conda_prefix"])]
def _add_environment(addenv, deps):
conda_bin = _get_conda_bin()
conda_envs = _get_envs(conda_bin)
if not any(x.endswith("/%s" % addenv) for x in conda_envs):
subprocess.check_call("{conda_bin} create --no-default-packages -y "
"--name {addenv} {deps}".format(**locals()), shell=True)
conda_envs = _get_envs(conda_bin)
return conda_bin, addenv
def _get_conda_channels(conda_bin):
"""Retrieve default conda channels, checking if they are pre-specified in config.
This allows users to override defaults with specific mirrors in their .condarc
"""
channels = ["bioconda", "conda-forge"]
out = []
config = yaml.safe_load(subprocess.check_output([conda_bin, "config", "--show"]))
for c in channels:
present = False
for orig_c in config.get("channels") or []:
if orig_c.endswith((c, "%s/" % c)):
present = True
break
if not present:
out += ["-c", c]
return out
def _update_conda_packages():
"""If installed in an anaconda directory, upgrade conda packages.
"""
conda_bin = _get_mamba_bin()
if not conda_bin:
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
assert conda_bin, ("Could not find anaconda distribution for upgrading bcbio.\n"
"Using python at %s but could not find conda." % (os.path.realpath(sys.executable)))
req_file = "bcbio-update-requirements.txt"
if os.path.exists(req_file):
os.remove(req_file)
subprocess.check_call(["wget", "-O", req_file, "--no-check-certificate", REMOTES["requirements"]])
subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels +
["--file", req_file])
if os.path.exists(req_file):
os.remove(req_file)
return os.path.dirname(os.path.dirname(conda_bin))
def _update_conda_latest():
"""Update to the latest bcbio conda package
"""
conda_bin = _get_conda_bin()
output = subprocess.run([conda_bin, "search", "-c", "bioconda", "bcbio-nextgen"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).stdout
lines = [l for l in output.decode().split("\n") if l]
latest = lines.pop()
tokens = latest.split()
conda_version = tokens[1].strip()
print(f"Detected {conda_version} as latest version of bcbio-nextgen on bioconda.")
channels = _get_conda_channels(conda_bin)
bcbio_version = version.__version__
if LooseVersion(bcbio_version) < LooseVersion(conda_version):
print(f"Installing bcbio {conda_version} from bioconda.")
subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels +
[f"bcbio-nextgen>={conda_version}"])
else:
print(f"bcbio version {bcbio_version} is newer than the conda version {conda_version}, skipping upgrade from conda.")
return os.path.dirname(os.path.dirname(conda_bin))
def _update_conda_devel():
"""Update to the latest development conda package.
"""
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
assert conda_bin, "Could not find anaconda distribution for upgrading bcbio"
subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels +
["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")])
return os.path.dirname(os.path.dirname(conda_bin))
def get_genome_dir(gid, galaxy_dir, data):
"""Return standard location of genome directories.
"""
if galaxy_dir:
refs = genome.get_refs(gid, None, galaxy_dir, data)
seq_file = tz.get_in(["fasta", "base"], refs)
if seq_file and os.path.exists(seq_file):
return os.path.dirname(os.path.dirname(seq_file))
else:
gdirs = glob.glob(os.path.join(_get_data_dir(), "genomes", "*", gid))
if len(gdirs) == 1 and os.path.exists(gdirs[0]):
return gdirs[0]
def _get_data_dir():
base_dir = os.path.realpath(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))))
if "anaconda" not in os.path.basename(base_dir) and "virtualenv" not in os.path.basename(base_dir):
raise ValueError("Cannot update data for bcbio-nextgen not installed by installer.\n"
"bcbio-nextgen needs to be installed inside an anaconda environment \n"
"located in the same directory as the `genomes` directory.")
return os.path.dirname(base_dir)
def get_gemini_dir(data=None):
try:
data_dir = _get_data_dir()
return os.path.join(data_dir, "gemini_data")
except ValueError:
if data:
galaxy_dir = dd.get_galaxy_dir(data)
data_dir = os.path.realpath(os.path.dirname(os.path.dirname(galaxy_dir)))
return os.path.join(data_dir, "gemini_data")
else:
return None
def upgrade_bcbio_data(args, remotes):
"""Upgrade required genome data files in place.
"""
if hasattr(args, "datadir") and args.datadir and os.path.exists(args.datadir):
data_dir = args.datadir
else:
data_dir = _get_data_dir()
tooldir = args.tooldir or get_defaults().get("tooldir")
galaxy_home = os.path.join(data_dir, "galaxy")
cbl = get_cloudbiolinux(remotes)
tool_data_table_conf_file = os.path.join(cbl["dir"], "installed_files", "tool_data_table_conf.xml")
genome_opts = _get_biodata(cbl["biodata"], args)
sys.path.insert(0, cbl["dir"])
cbl_genomes = __import__("cloudbio.biodata.genomes", fromlist=["genomes"])
cbl_genomes.install_data_local(genome_opts, tooldir, data_dir, galaxy_home, tool_data_table_conf_file,
args.cores, ["ggd", "s3", "raw"])
_upgrade_genome_resources(galaxy_home, remotes["genome_resources"])
_upgrade_snpeff_data(galaxy_home, args, remotes)
if "vep" in args.datatarget:
_upgrade_vep_data(galaxy_home, tooldir)
if "kraken" in args.datatarget:
_install_kraken_db(_get_data_dir(), args)
if args.cwl:
_prepare_cwl_tarballs(data_dir)
def _prepare_cwl_tarballs(data_dir):
"""Create CWL ready tarballs for complex directories.
Avoids need for CWL runners to pass and serialize complex directories
of files, which is inconsistent between runners.
"""
for dbref_dir in filter(os.path.isdir, glob.glob(os.path.join(data_dir, "genomes", "*", "*"))):
base_dir, dbref = os.path.split(dbref_dir)
for indexdir in TARBALL_DIRECTORIES:
cur_target = os.path.join(dbref_dir, indexdir)
if os.path.isdir(cur_target):
# Some indices, like rtg, have a single nested directory
subdirs = [x for x in os.listdir(cur_target) if os.path.isdir(os.path.join(cur_target, x))]
if len(subdirs) == 1:
cur_target = os.path.join(cur_target, subdirs[0])
create.directory_tarball(cur_target)
def _upgrade_genome_resources(galaxy_dir, base_url):
"""Retrieve latest version of genome resource YAML configuration files.
"""
import requests
for dbkey, ref_file in genome.get_builds(galaxy_dir):
# Check for a remote genome resources file
remote_url = base_url % dbkey
requests.packages.urllib3.disable_warnings()
r = requests.get(remote_url, verify=False)
if r.status_code == requests.codes.ok:
local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url))
if os.path.exists(local_file):
with open(local_file) as in_handle:
local_config = yaml.safe_load(in_handle)
remote_config = yaml.safe_load(r.text)
needs_update = remote_config["version"] > local_config.get("version", 0)
if needs_update:
shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0))
else:
needs_update = True
if needs_update:
print("Updating %s genome resources configuration" % dbkey)
with open(local_file, "w") as out_handle:
out_handle.write(r.text)
def _upgrade_vep_data(galaxy_dir, tooldir):
for dbkey, ref_file in genome.get_builds(galaxy_dir):
effects.prep_vep_cache(dbkey, ref_file, tooldir)
def _upgrade_snpeff_data(galaxy_dir, args, remotes):
"""Install or upgrade snpEff databases, localized to reference directory.
"""
snpeff_version = effects.snpeff_version(args)
if not snpeff_version:
return
for dbkey, ref_file in genome.get_builds(galaxy_dir):
resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey)
if os.path.exists(resource_file):
with open(resource_file) as in_handle:
resources = yaml.safe_load(in_handle)
snpeff_db, snpeff_base_dir = effects.get_db({"genome_resources": resources,
"reference": {"fasta": {"base": ref_file}}})
if snpeff_db:
snpeff_db_dir = os.path.join(snpeff_base_dir, snpeff_db)
if os.path.exists(snpeff_db_dir) and _is_old_database(snpeff_db_dir, args):
shutil.rmtree(snpeff_db_dir)
if not os.path.exists(snpeff_db_dir):
print("Installing snpEff database %s in %s" % (snpeff_db, snpeff_base_dir))
dl_url = remotes["snpeff_dl_url"].format(
snpeff_ver=snpeff_version.replace(".", "_"),
genome=snpeff_db)
dl_file = os.path.basename(dl_url)
with utils.chdir(snpeff_base_dir):
subprocess.check_call(["wget", "--no-check-certificate", "-c", "-O", dl_file, dl_url])
subprocess.check_call(["unzip", dl_file])
os.remove(dl_file)
dl_dir = os.path.join(snpeff_base_dir, "data", snpeff_db)
shutil.move(dl_dir, snpeff_db_dir)
os.rmdir(os.path.join(snpeff_base_dir, "data"))
if args.cwl:
create.directory_tarball(snpeff_db_dir)
def _is_old_database(db_dir, args):
"""Check for old database versions, supported in snpEff 4.1.
"""
snpeff_version = effects.snpeff_version(args)
if LooseVersion(snpeff_version) >= LooseVersion("4.1"):
pred_file = os.path.join(db_dir, "snpEffectPredictor.bin")
if not utils.file_exists(pred_file):
return True
with utils.open_gzipsafe(pred_file, is_gz=True) as in_handle:
version_info = in_handle.readline().strip().split("\t")
program, version = version_info[:2]
if not program.lower() == "snpeff" or LooseVersion(snpeff_version) > LooseVersion(version):
return True
return False
def _get_biodata(base_file, args):
"""Retrieve biodata genome targets customized by install parameters.
"""
with open(base_file) as in_handle:
config = yaml.safe_load(in_handle)
config["install_liftover"] = False
config["genome_indexes"] = args.aligners
ann_groups = config.pop("annotation_groups", {})
config["genomes"] = [_setup_genome_annotations(g, args, ann_groups)
for g in config["genomes"] if g["dbkey"] in args.genomes]
return config
def _setup_genome_annotations(g, args, ann_groups):
"""Configure genome annotations to install based on datatarget.
"""
available_anns = g.get("annotations", []) + g.pop("annotations_available", [])
anns = []
for orig_target in args.datatarget:
if orig_target in ann_groups:
targets = ann_groups[orig_target]
else:
targets = [orig_target]
for target in targets:
if target in available_anns:
anns.append(target)
g["annotations"] = anns
if "variation" not in args.datatarget and "validation" in g:
del g["validation"]
return g
def upgrade_thirdparty_tools(args, remotes):
"""Install and update third party tools used in the pipeline.
Creates a manifest directory with installed programs on the system.
"""
conda_bin = _get_mamba_bin()
if not conda_bin:
conda_bin = _get_conda_bin()
cbl = get_cloudbiolinux(remotes)
if args.toolconf and os.path.exists(args.toolconf):
package_yaml = args.toolconf
else:
package_yaml = os.path.join(cbl["dir"], "contrib", "flavor",
"ngs_pipeline_minimal", "packages-conda.yaml")
sys.path.insert(0, cbl["dir"])
cbl_conda = __import__("cloudbio.package.conda", fromlist=["conda"])
cbl_conda.install_in(conda_bin, args.tooldir, package_yaml)
manifest_dir = os.path.join(_get_data_dir(), "manifest")
print("Creating manifest of installed packages in %s" % manifest_dir)
cbl_manifest = __import__("cloudbio.manifest", fromlist=["manifest"])
if os.path.exists(manifest_dir):
for fname in os.listdir(manifest_dir):
if not fname.startswith("toolplus"):
os.remove(os.path.join(manifest_dir, fname))
cbl_manifest.create(manifest_dir, args.tooldir)
def _install_toolplus(args):
"""Install additional tools we cannot distribute, updating local manifest.
"""
manifest_dir = os.path.join(_get_data_dir(), "manifest")
toolplus_manifest = os.path.join(manifest_dir, "toolplus-packages.yaml")
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
# Handle toolplus installs inside Docker container
if not os.path.exists(system_config):
docker_system_config = os.path.join(_get_data_dir(), "config", "bcbio_system.yaml")
if os.path.exists(docker_system_config):
system_config = docker_system_config
toolplus_dir = os.path.join(_get_data_dir(), "toolplus")
for tool in args.toolplus:
if tool.name in set(["gatk", "mutect"]):
print("Installing %s" % tool.name)
_install_gatk_jar(tool.name, tool.fname, toolplus_manifest, system_config, toolplus_dir)
else:
raise ValueError("Unexpected toolplus argument: %s %s" % (tool.name, tool.fname))
def get_gatk_jar_version(name, fname):
if name == "gatk":
return broad.get_gatk_version(fname)
elif name == "mutect":
return broad.get_mutect_version(fname)
else:
raise ValueError("Unexpected GATK input: %s" % name)
def _install_gatk_jar(name, fname, manifest, system_config, toolplus_dir):
"""Install a jar for GATK or associated tools like MuTect.
"""
if not fname.endswith(".jar"):
raise ValueError("--toolplus argument for %s expects a jar file: %s" % (name, fname))
version = get_gatk_jar_version(name, fname)
store_dir = utils.safe_makedir(os.path.join(toolplus_dir, name, version))
shutil.copyfile(fname, os.path.join(store_dir, os.path.basename(fname)))
_update_system_file(system_config, name, {"dir": store_dir})
_update_manifest(manifest, name, version)
def _update_manifest(manifest_file, name, version):
"""Update the toolplus manifest file with updated name and version
"""
if os.path.exists(manifest_file):
with open(manifest_file) as in_handle:
manifest = yaml.safe_load(in_handle)
else:
manifest = {}
manifest[name] = {"name": name, "version": version}
with open(manifest_file, "w") as out_handle:
yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
def _update_system_file(system_file, name, new_kvs):
"""Update the bcbio_system.yaml file with new resource information.
"""
if os.path.exists(system_file):
bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.copyfile(system_file, bak_file)
with open(system_file) as in_handle:
config = yaml.safe_load(in_handle)
else:
utils.safe_makedir(os.path.dirname(system_file))
config = {}
new_rs = {}
added = False
for rname, r_kvs in config.get("resources", {}).items():
if rname == name:
for k, v in new_kvs.items():
r_kvs[k] = v
added = True
new_rs[rname] = r_kvs
if not added:
new_rs[name] = new_kvs
config["resources"] = new_rs
with open(system_file, "w") as out_handle:
yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False)
def _install_kraken_db(datadir, args):
"""Install kraken minimal DB in genome folder.
"""
import requests
kraken = os.path.join(datadir, "genomes/kraken")
url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz"
compress = os.path.join(kraken, os.path.basename(url))
base, ext = utils.splitext_plus(os.path.basename(url))
db = os.path.join(kraken, base)
tooldir = args.tooldir or get_defaults()["tooldir"]
requests.packages.urllib3.disable_warnings()
last_mod = urllib.request.urlopen(url).info().get('Last-Modified')
last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc())
if os.path.exists(os.path.join(tooldir, "bin", "kraken")):
if not os.path.exists(db):
is_new_version = True
else:
cur_file = glob.glob(os.path.join(kraken, "minikraken_*"))[0]
cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file))
is_new_version = last_mod.date() > cur_version.date()
if is_new_version:
shutil.move(cur_file, cur_file.replace('minikraken', 'old'))
if not os.path.exists(kraken):
utils.safe_makedir(kraken)
if is_new_version:
if not os.path.exists(compress):
subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"])
cmd = ["tar", "-xzvf", compress, "-C", kraken]
subprocess.check_call(cmd)
last_version = glob.glob(os.path.join(kraken, "minikraken_*"))
utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, "minikraken"))
utils.remove_safe(compress)
else:
print("You have the latest version %s." % last_mod)
else:
raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." %
os.path.join(tooldir, "bin", "kraken"))
# ## Store a local configuration file with upgrade details
def _get_install_config():
"""Return the YAML configuration file used to store upgrade information.
"""
try:
data_dir = _get_data_dir()
except ValueError:
return None
config_dir = utils.safe_makedir(os.path.join(data_dir, "config"))
return os.path.join(config_dir, "install-params.yaml")
def save_install_defaults(args):
"""Save installation information to make future upgrades easier.
"""
install_config = _get_install_config()
if install_config is None:
return
if utils.file_exists(install_config):
with open(install_config) as in_handle:
cur_config = yaml.safe_load(in_handle)
else:
cur_config = {}
if args.tooldir:
cur_config["tooldir"] = args.tooldir
cur_config["isolate"] = args.isolate
for attr in ["genomes", "aligners", "datatarget"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(x)
# toolplus -- save non-filename inputs
attr = "toolplus"
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if not x.fname:
if x.name not in cur_config[attr]:
cur_config[attr].append(x.name)
with open(install_config, "w") as out_handle:
yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def add_install_defaults(args):
"""Add any saved installation defaults to the upgrade.
"""
# Ensure we install data if we've specified any secondary installation targets
if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.datatarget) > 0:
args.install_data = True
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
default_args = {}
else:
with open(install_config) as in_handle:
default_args = yaml.safe_load(in_handle)
# if we are upgrading to development, also upgrade the tools
if args.upgrade in ["development"] and (args.tooldir or "tooldir" in default_args):
args.tools = True
if args.tools and args.tooldir is None:
if "tooldir" in default_args:
args.tooldir = str(default_args["tooldir"])
else:
raise ValueError("Default tool directory not yet saved in config defaults. "
"Specify the '--tooldir=/path/to/tools' to upgrade tools. "
"After a successful upgrade, the '--tools' parameter will "
"work for future upgrades.")
for attr in ["genomes", "aligners"]:
# don't upgrade default genomes if a genome was specified
if attr == "genomes" and len(args.genomes) > 0:
continue
for x in default_args.get(attr, []):
x = str(x)
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _datatarget_defaults(args, default_args)
if "isolate" in default_args and args.isolate is not True:
args.isolate = default_args["isolate"]
return args
def _datatarget_defaults(args, default_args):
"""Set data installation targets, handling defaults.
Sets variation, rnaseq, smallrna as default targets if we're not
isolated to a single method.
Provides back compatibility for toolplus specifications.
"""
default_data = default_args.get("datatarget", [])
# back-compatible toolplus specifications
for x in default_args.get("toolplus", []):
val = None
if x == "data":
val = "gemini"
elif x in ["dbnsfp", "dbscsnv", "kraken", "gnomad"]:
val = x
if val and val not in default_data:
default_data.append(val)
new_val = getattr(args, "datatarget")
for x in default_data:
if x not in new_val:
new_val.append(x)
has_std_target = False
std_targets = ["variation", "rnaseq", "smallrna"]
for target in std_targets:
if target in new_val:
has_std_target = True
break
if not has_std_target:
new_val = new_val + std_targets
setattr(args, "datatarget", new_val)
return args
def get_defaults():
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
return {}
with open(install_config) as in_handle:
return yaml.safe_load(in_handle)
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
if "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
def add_subparser(subparsers):
parser = subparsers.add_parser("upgrade", help="Install or upgrade bcbio-nextgen")
parser.add_argument("--cores", default=1,
help="Number of cores to use if local indexing is necessary.")
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--tools",
help="Boolean argument specifying upgrade of tools. Uses previously saved install directory",
action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to upgrade",
choices=["stable", "development", "system", "deps", "skip"], default="skip")
parser.add_argument("--toolconf", help="YAML configuration file of tools to install", default=None,
type=lambda x: (os.path.abspath(os.path.expanduser(x))))
parser.add_argument("--revision", help="Specify a git commit hash or tag to install", default="master")
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--datatarget", help="Data to install. Allows customization or install of extra data.",
action="append", default=[],
choices=["variation", "rnaseq", "smallrna", "gemini", "vep", "dbnsfp", "dbscsnv", "battenberg", "kraken", "ericscript", "gnomad"])
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[], choices=SUPPORTED_GENOMES)
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=SUPPORTED_INDEXES)
parser.add_argument("--data", help="Upgrade data dependencies",
dest="install_data", action="store_true", default=False)
parser.add_argument("--cwl", help="Install code and data for running CWL workflows",
dest="cwl", action="store_true", default=False)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
return parser
def get_cloudbiolinux(remotes):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call("wget --progress=dot:mega --no-check-certificate -O- %s | tar xz && "
"(mv cloudbiolinux-master cloudbiolinux || mv master cloudbiolinux)"
% remotes["cloudbiolinux"], shell=True)
return {"biodata": os.path.join(base_dir, "config", "biodata.yaml"),
"dir": base_dir}
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
|
a113n/bcbio-nextgen
|
bcbio/install.py
|
Python
|
mit
| 37,903
|
[
"BWA",
"Bioconda",
"Bowtie",
"Galaxy"
] |
11d7ea10686273fd0d6fdb007336352ea37a9763886a5555a4168e749e849a10
|
from __future__ import division
from menpo.transform import AlignmentSimilarity
from menpo.model.pdm import PDM, OrthoPDM
from menpo.fit.gradientdescent import RegularizedLandmarkMeanShift
from menpo.fit.gradientdescent.residual import SSD
from menpo.fitmultilevel.base import MultilevelFitter
from menpo.fitmultilevel.featurefunctions import compute_features
class CLMFitter(MultilevelFitter):
r"""
Mixin for Constrained Local Models Fitters.
Parameters
-----------
clm: :class:`menpo.fitmultilevel.clm.builder.CLM`
The Constrained Local Model to be used.
"""
def __init__(self, clm):
self.clm = clm
@property
def reference_shape(self):
return self.clm.reference_shape
@property
def feature_type(self):
return self.clm.feature_type
@property
def n_levels(self):
return self.clm.n_levels
@property
def downscale(self):
return self.clm.downscale
@property
def scaled_levels(self):
return self.clm.scaled_levels
@property
def interpolator(self):
return self.clm.interpolator
# TODO: Can this be moved up?
def _prepare_image(self, image, initial_shape, gt_shape=None):
r"""
The image is first rescaled wrt the reference_landmarks, then
smoothing or gaussian pyramid are computed and, finally, features
are extracted from each pyramidal element.
"""
image.landmarks['initial_shape'] = initial_shape
image = image.rescale_to_reference_shape(
self.reference_shape, group='initial_shape',
interpolator=self.interpolator)
if gt_shape:
image.landmarks['gt_shape'] = initial_shape
if self.n_levels > 1:
if self.scaled_levels:
pyramid = image.gaussian_pyramid(
n_levels=self.n_levels, downscale=self.downscale)
else:
pyramid = image.smoothing_pyramid(
n_levels=self.n_levels, downscale=self.downscale)
images = [compute_features(i, self.feature_type)
for i in pyramid]
images.reverse()
else:
images = [compute_features(image, self.feature_type)]
return images
# TODO: document me
# TODO: Residuals (SSD) is not used at the moment
class GradientDescentCLMFitter(CLMFitter):
r"""
Gradient Descent based Fitter for Constrained Local Models.
Parameters
-----------
clm: :class:`menpo.fitmultilevel.clm.builder.CLM`
The Constrained Local Model to be used.
algorithm: :class:`menpo.fit.gradientdescent.base`, optional
The Gradient Descent class to be used.
Default: RegularizedLandmarkMeanShift
residual: :class:`menpo.fit.gradientdescent.residual`, optional
The residual class to be used
Default: 'SSD'
pdm: :class:`menpo.transform.ModelDrivenTransform`, optional
The point distribution transform class to be used.
Default: OrthoPDMTransform
global_transform: :class:`menpo.transform.affine`, optional
The global transform class to be used by the previous
md_transform_cls. Currently, only
:class:`menpo.transform.affine.Similarity` is supported.
Default: Similarity
n_shape: list, optional
The number of shape components to be used per fitting level.
If None, for each shape model n_active_components will be used.
Default: None
n_appearance: list, optional
The number of appearance components to be used per fitting level.
If None, for each appearance model n_active_components will be used.
Default: None
"""
def __init__(self, clm, algorithm=RegularizedLandmarkMeanShift,
residual=SSD, pdm_transform=OrthoPDM,
global_transform=AlignmentSimilarity, n_shape=None):
super(GradientDescentCLMFitter, self).__init__(clm)
self._set_up(algorithm=algorithm, residual=residual,
pdm_transform=pdm_transform,
global_transform=global_transform,
n_shape=n_shape)
@property
def algorithm(self):
return 'GD-CLM-' + self._fitters[0].algorithm
# TODO: document me
def _set_up(self, algorithm=RegularizedLandmarkMeanShift, residual=SSD,
pdm_transform=OrthoPDM,
global_transform=AlignmentSimilarity, n_shape=None):
r"""
Sets up the gradient descent fitter object.
Parameters
-----------
clm: :class:`menpo.fitmultilevel.clm.builder.CLM`
The Constrained Local Model to be use.
algorithm: :class:`menpo.fit.gradientdescent.base`, optional
The Gradient Descent class to be used.
Default: RegularizedLandmarkMeanShift
residual: :class:`menpo.fit.gradientdescent.residual`, optional
The residual class to be used
Default: 'SSD'
pdm: :class:`menpo.transform.ModelDrivenTransform`, optional
The point distribution transform class to be used.
Default: OrthoPDMTransform
global_transform: :class:`menpo.transform.affine`, optional
The global transform class to be used by the previous
md_transform_cls. Currently, only
:class:`menpo.transform.affine.Similarity` is supported.
Default: Similarity
n_shape: list, optional
The number of shape components to be used per fitting level.
If None, for each shape model n_active_components will be used.
Default: None
n_appearance: list, optional
The number of appearance components to be used per fitting level.
If None, for each appearance model n_active_components will be used.
Default: None
"""
if n_shape is None:
n_shape = [sm.n_active_components
for sm in self.clm.shape_models]
if type(n_shape) is int:
n_shape = [n_shape for _ in range(self.clm.n_levels)]
elif len(n_shape) is 1 and self.clm.n_levels > 1:
n_shape = [n_shape[0] for _ in range(self.clm.n_levels)]
elif len(n_shape) is not self.clm.n_levels:
raise ValueError('n_shape can be integer, integer list '
'containing 1 or {} elements or '
'None'.format(self.clm.n_levels))
self._fitters = []
for j, (sm, clf) in enumerate(zip(self.clm.shape_models,
self.clm.classifiers)):
if n_shape is not None:
sm.n_active_components = n_shape[j]
if pdm_transform is not PDM:
pdm_trans = pdm_transform(sm, global_transform)
else:
pdm_trans = pdm_transform(sm)
self._fitters.append(algorithm(clf,
self.clm.patch_shape,
pdm_trans))
|
jabooth/menpo-archive
|
menpo/fitmultilevel/clm/base.py
|
Python
|
bsd-3-clause
| 7,116
|
[
"Gaussian"
] |
a918410aa3b379cdf83f759becf031f6a0bf3bb633754fd01d2cfc82c988cf7c
|
""" X509Certificate is a class for managing X509 certificates
Proxy RFC: https://tools.ietf.org/html/rfc38200
X509RFC: https://tools.ietf.org/html/rfc5280
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import datetime
import os
import random
import time
import M2Crypto
from io import open
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Security.m2crypto import asn1_utils
from DIRAC.Core.Utilities.Decorators import executeOnlyIf
# Decorator to execute the method only of the certificate has been loaded
executeOnlyIfCertLoaded = executeOnlyIf('_certLoaded', S_ERROR(DErrno.ENOCERT))
class X509Certificate(object):
""" The X509Certificate object represents ... a X509Certificate.
It is a wrapper around a lower level implementation (M2Crypto in this case) of a certificate.
In theory, tt can be a host or user certificate. Also, a proxy certificate is a X509Certificate,
however it is useless without all the chain of issuers.
That's why one has the X509Chain.
In practice, X509Certificate is just used for checking if the host certificate has expired.
This class will most probably disappear once we get ride of pyGSI. After all, a X509Certificate
is nothing but a X509Chain of length 1.
Note that the SSL connection itself does not use this class, it gives directly the certificate to the library
"""
def __init__(self, x509Obj=None, certString=None):
"""
Constructor.
You can give either nothing, or the x509Obj or the certString
:param x509Obj: (optional) certificate instance
:type x509Obj: M2Crypto.X509.X509
:param certString: text representation of certificate
:type certString: String
"""
self._certLoaded = False
if x509Obj:
self.__certObj = x509Obj
self._certLoaded = True
elif certString:
self.loadFromString(certString)
# Pylint is surprisingly picky here, so remove that warning
# pylint: disable=protected-access
@classmethod
def generateProxyCertFromIssuer(cls, x509Issuer, x509ExtensionStack, proxyKey, lifetime=3600):
""" This class method is meant to generate a new X509Certificate out of an existing one.
Basically, it generates a proxy... However, you can't have a proxy certificate working on
its own, you need all the chain of certificates. This method is meant to be called
only by the X509Chain class.
Inspired from https://github.com/eventbrite/m2crypto/blob/master/demo/x509/ca.py#L45
:param x509Issuer: X509Certificate instance from which we generate the next one
:param x509ExtensionStack: M2Crypto.X509.X509_Extension_Stack object to add to the new certificate.
It contains all the X509 extensions needed for the proxy (e.g. DIRAC group).
See ~X509Chain.__getProxyExtensionList
:param proxyKey: a M2Crypto.EVP.PKey instance with private and public key
:param lifetime: duration of the proxy in second. Default 3600
:returns: a new X509Certificate
"""
proxyCert = cls()
proxyCert.__certObj = M2Crypto.X509.X509()
# According to the proxy RFC, the serial
# number just need to be uniqu among the proxy generated by the issuer.
# The random module of python will be good enough for that
serial = int(random.random() * 10 ** 10)
proxyCert.__certObj.set_serial_number(serial)
# No easy way to deep-copy certificate subject, since they are swig object
# We basically get a string like 'O=Dirac Computing, O=CERN, CN=MrUser'
# So we split it, and then re-add each entry after the other.
proxySubject = M2Crypto.X509.X509_Name()
issuerSubjectObj = x509Issuer.__certObj.get_subject()
issuerSubjectParts = issuerSubjectObj.as_text().split(', ')
for isPart in issuerSubjectParts:
nid, val = isPart.split('=', 1)
proxySubject.add_entry_by_txt(field=nid, type=M2Crypto.ASN1.MBSTRING_ASC, entry=val, len=-1, loc=-1, set=0)
# Finally we add a random Common Name component. And we might as well use the serial.. :)
proxySubject.add_entry_by_txt(field="CN", type=M2Crypto.ASN1.MBSTRING_ASC,
entry=str(serial), len=-1, loc=-1, set=0)
proxyCert.__certObj.set_subject(proxySubject)
# We now add all the extensions we wish to add
for extension in x509ExtensionStack:
proxyCert.__certObj.add_ext(extension)
proxyCert.__certObj.set_issuer(issuerSubjectObj)
# According to the X509 RFC, we are safe if we just copy the version
# number from the issuer certificate
proxyCert.__certObj.set_version(x509Issuer.__certObj.get_version())
proxyCert.__certObj.set_pubkey(proxyKey)
# Set the start of the validity a bit in the past
# to be sure to be able to use it right now
proxyNotBefore = M2Crypto.ASN1.ASN1_UTCTIME()
proxyNotBefore.set_time(int(time.time()) - 900)
proxyCert.__certObj.set_not_before(proxyNotBefore)
# Set the end date of the validity according to the lifetime
proxyNotAfter = M2Crypto.ASN1.ASN1_UTCTIME()
proxyNotAfter.set_time(int(time.time()) + lifetime)
proxyCert.__certObj.set_not_after(proxyNotAfter)
# Finally set it as loaded. Care that it is not yet signed !!
proxyCert._certLoaded = True
return S_OK(proxyCert)
def load(self, certificate):
""" Load an x509 certificate either from a file or from a string
:param certificate: path to the file or PEM encoded string
:returns: S_OK on success, otherwise S_ERROR
"""
if os.path.exists(certificate):
return self.loadFromFile(certificate)
return self.loadFromString(certificate)
def loadFromFile(self, certLocation):
"""
Load a x509 cert from a pem file
:param certLocation: path to the certificate file
:returns: S_OK / S_ERROR.
"""
try:
with open(certLocation, 'rb') as fd:
pemData = fd.read()
return self.loadFromString(pemData)
except IOError:
return S_ERROR(DErrno.EOF, "Can't open %s file" % certLocation)
def loadFromString(self, pemData):
"""
Load a x509 cert from a string containing the pem data
:param pemData: pem encoded string
:returns: S_OK / S_ERROR
"""
try:
self.__certObj = M2Crypto.X509.load_cert_string(pemData, M2Crypto.X509.FORMAT_PEM)
except Exception as e:
return S_ERROR(DErrno.ECERTREAD, "Can't load pem data: %s" % e)
self._certLoaded = True
return S_OK()
@executeOnlyIfCertLoaded
def hasExpired(self):
"""
Check if the loaded certificate is still valid
:returns: S_OK( True/False )/S_ERROR
"""
res = self.getNotAfterDate()
if not res['OK']:
return res
notAfter = res['Value']
now = datetime.datetime.utcnow()
return S_OK(notAfter < now)
@executeOnlyIfCertLoaded
def getNotAfterDate(self):
"""
Get not after date of a certificate
:returns: S_OK( datetime )/S_ERROR
"""
notAfter = self.__certObj.get_not_after().get_datetime()
# M2Crypto does things correctly by setting a timezone info in the datetime
# However, we do not in DIRAC, and so we can't compare the dates.
# We have to remove the timezone info from M2Crypto
notAfter = notAfter.replace(tzinfo=None)
return S_OK(notAfter)
@executeOnlyIfCertLoaded
def getNotBeforeDate(self):
"""
Get not before date of a certificate
:returns: S_OK( datetime )/S_ERROR
"""
return S_OK(self.__certObj.get_not_before().get_datetime())
# @executeOnlyIfCertLoaded
# def setNotBefore(self, notbefore):
# """
# Set not before date of a certificate This method is not meant to be used, but to generate a proxy.
# :returns: S_OK/S_ERROR
# """
# self.__certObj.set_not_before(notbefore)
# return S_OK()
@executeOnlyIfCertLoaded
def getSubjectDN(self):
"""
Get subject DN
:returns: S_OK( string )/S_ERROR
"""
return S_OK(str(self.__certObj.get_subject()))
@executeOnlyIfCertLoaded
def getIssuerDN(self):
"""
Get issuer DN
:returns: S_OK( string )/S_ERROR
"""
return S_OK(str(self.__certObj.get_issuer()))
@executeOnlyIfCertLoaded
def getSubjectNameObject(self):
"""
Get subject name object
:returns: S_OK( X509Name )/S_ERROR
"""
return S_OK(self.__certObj.get_subject())
# The following method is in pyGSI,
# but are only used by the pyGSI SSL implementation
# So I do not really need them
# @executeOnlyIfCertLoaded
# def getIssuerNameObject(self):
# """
# Get issuer name object
# :returns: S_OK( X509Name )/S_ERROR
# """
# return S_OK(self.__certObj.get_issuer())
@executeOnlyIfCertLoaded
def getPublicKey(self):
"""
Get the public key of the certificate
:returns: S_OK(M2crypto.EVP.PKey)
"""
return S_OK(self.__certObj.get_pubkey())
@executeOnlyIfCertLoaded
def getSerialNumber(self):
"""
Get certificate serial number
:returns: S_OK( serial )/S_ERROR
"""
return S_OK(self.__certObj.get_serial_number())
@executeOnlyIfCertLoaded
def sign(self, key, algo):
"""
Sign the cerificate using provided key and algorithm.
:param key: M2crypto.EVP.PKey object with private and public key
:param algo: algorithm to sign the certificate
:returns: S_OK/S_ERROR
"""
try:
self.__certObj.sign(key, algo)
except Exception as e:
return S_ERROR(repr(e))
return S_OK()
@executeOnlyIfCertLoaded
def getDIRACGroup(self, ignoreDefault=False):
"""
Get the dirac group if present
If no group is found in the certificate, we query the CS to get the default group
for the given user. This can be disabled using the ignoreDefault parameter
Note that the lookup in the CS only can work for a proxy of first generation,
since we search based on the issuer DN
:param ignoreDefault: if True, do not lookup the CS
:returns: S_OK(group name/bool)
"""
try:
return S_OK(asn1_utils.decodeDIRACGroup(self.__certObj))
except LookupError:
pass
if ignoreDefault:
return S_OK(False)
# And here is the flaw :)
result = self.getIssuerDN()
if not result['OK']:
return result
return Registry.findDefaultGroupForDN(result['Value'])
@executeOnlyIfCertLoaded
def hasVOMSExtensions(self):
"""
Has voms extensions
:returns: S_OK(bool) if voms extensions are found
"""
# `get_ext` would be the correct thing to do.
# However, it does not work for the moment, as the extension
# is not registered with an alias
# https://gitlab.com/m2crypto/m2crypto/issues/231
# try:
# self.__certObj.get_ext('vomsExtensions')
# return S_OK(True)
# except LookupError:
# # no extension found
# pass
return S_OK(asn1_utils.hasVOMSExtension(self.__certObj))
@executeOnlyIfCertLoaded
def getVOMSData(self):
"""
Get voms extensions data
:returns: S_ERROR/S_OK(dict). For the content of the dict,
see :py:func:`~DIRAC.Core.Security.m2crypto.asn1_utils.decodeVOMSExtension`
"""
try:
vomsExt = asn1_utils.decodeVOMSExtension(self.__certObj)
return S_OK(vomsExt)
except LookupError:
return S_ERROR(DErrno.EVOMS, "No VOMS data available")
@executeOnlyIfCertLoaded
def generateProxyRequest(self, bitStrength=1024, limited=False):
"""
Generate a proxy request. See :py:class:`DIRAC.Core.Security.m2crypto.X509Request.X509Request`
In principle, there is no reason to have this here, since a the X509Request is independant of
the 509Certificate when generating it. The only reason is to check whether the current Certificate
is limited or not.
:param bitStrength: strength of the key
:param limited: if True or if the current certificate is limited (see proxy RFC),
creates a request for a limited proxy
:returns: S_OK( :py:class:`DIRAC.Core.Security.m2crypto.X509Request.X509Request` ) / S_ERROR
"""
if not limited:
# We check whether "limited proxy" is in the subject
subj = self.__certObj.get_subject()
# M2Crypto does not understand the [-1] syntax...
lastEntry = subj[len(subj) - 1]
if lastEntry.get_data() == "limited proxy":
limited = True
# The import is done here to avoid circular import
# X509Certificate -> X509Request -> X509Chain -> X509Certificate
from DIRAC.Core.Security.m2crypto.X509Request import X509Request
req = X509Request()
req.generateProxyRequest(bitStrength=bitStrength, limited=limited)
return S_OK(req)
@executeOnlyIfCertLoaded
def getRemainingSecs(self):
"""
Get remaining lifetime in secs
:returns: S_OK(remaining seconds)
"""
notAfter = self.getNotAfterDate()['Value']
now = datetime.datetime.utcnow()
remainingSeconds = max(0, int((notAfter - now).total_seconds()))
return S_OK(remainingSeconds)
@executeOnlyIfCertLoaded
def getExtensions(self):
"""
Get a decoded list of extensions
:returns: S_OK( list of tuple (extensionName, extensionValue))
"""
extList = []
for i in range(self.__certObj.get_ext_count()):
sn = self.__certObj.get_ext_at(i).get_name()
try:
value = self.__certObj.get_ext_at(i).get_value()
except Exception:
value = "Cannot decode value"
extList.append((sn, value))
return S_OK(sorted(extList))
@executeOnlyIfCertLoaded
def verify(self, pkey):
"""
Verify the signature of the certificate using the public key provided
:param pkey: ~M2Crypto.EVP.PKey object
:returns: S_OK(bool) where the boolean shows the success of the verification
"""
ret = self.__certObj.verify(pkey)
return S_OK(ret == 1)
@executeOnlyIfCertLoaded
def asPem(self):
"""
Return certificate as PEM string
:returns: pem string
"""
return self.__certObj.as_pem()
@executeOnlyIfCertLoaded
def getExtension(self, name):
"""
Return X509 Extension with given name
:param name: name of the extension
:returns: S_OK with M2Crypto.X509.X509_Extension object, or S_ERROR
"""
try:
ext = self.__certObj.get_ext(name)
except LookupError as e:
return S_ERROR(e)
return S_OK(ext)
|
yujikato/DIRAC
|
src/DIRAC/Core/Security/m2crypto/X509Certificate.py
|
Python
|
gpl-3.0
| 14,715
|
[
"DIRAC"
] |
71e1f156dc54c6df3b8fa7fd20e75a283b467cabf4220b42c81c2c1961d10090
|
"""Loader for MSG, netcdf format.
"""
from ConfigParser import ConfigParser
from mpop import CONFIG_PATH
import os
import numpy.ma as ma
from numpy import array as np_array
from numpy import nan as np_nan
from glob import glob
from mpop.projector import get_area_def
import datetime
try:
import h5py
except ImportError:
print "... module h5py needs to be installed"
quit()
from mipp.xrit.MSG import _Calibrator
import logging
LOG = logging.getLogger(__name__)
#from mpop.utils import debug_on
#debug_on()
SatelliteIds = { '08': 321, # Meteosat 8
'8': 321, # Meteosat 8
'09': 322, # Meteosat 9
'9': 322, # Meteosat 9
'10': 323, # Meteosat 10
'11': 324 } # Meteosat 11
channel_numbers = {"VIS006": 1,
"VIS008": 2,
"IR_016": 3,
"IR_039": 4,
"WV_062": 5,
"WV_073": 6,
"IR_087": 7,
"IR_097": 8,
"IR_108": 9,
"IR_120": 10,
"IR_134": 11,
"HRV": 12}
dict_channel= {'VIS006':'Channel 01','VIS008':'Channel 02','IR_016':'Channel 03','IR_039':'Channel 04','WV_062':'Channel 05','WV_073':'Channel 06',\
'IR_087':'Channel 07','IR_097':'Channel 08','IR_108':'Channel 09','IR_120':'Channel 10','IR_134':'Channel 11','HRV':'Channel 12'}
def load(satscene, calibrate=True, area_extent=None, **kwargs):
"""Load MSG SEVIRI data from hdf5 format.
"""
# Read config file content
conf = ConfigParser()
conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
values = {"orbit": satscene.orbit,
"satname": satscene.satname,
"number": satscene.number,
"instrument": satscene.instrument_name,
"satellite": satscene.fullname
}
LOG.info("assume seviri-level4")
print "... assume seviri-level4"
satscene.add_to_history("hdf5 data read by mpop/msg_seviri_hdf.py")
if "reader_level" in kwargs.keys():
reader_level = kwargs["reader_level"]
else:
reader_level = "seviri-level4"
if "RSS" in kwargs.keys():
if kwargs["RSS"]:
dt_end = 4
else:
dt_end = 12
else:
from my_msg_module import check_RSS
RSS = check_RSS(satscene.sat_nr(), satscene.time_slot)
if RSS == None:
print "*** Error in mpop/satin/msg_seviri_hdf.py"
print " satellite MSG", satscene.sat_nr() ," is not active yet"
quit()
else:
if RSS:
dt_end = 4
else:
dt_end = 12
print "... hdf file name is specified by observation end time"
print " assume ", dt_end, " min between start and end time of observation"
# end of scan time 4 min after start
end_time = satscene.time_slot + datetime.timedelta(minutes=dt_end)
filename = os.path.join( end_time.strftime(conf.get(reader_level, "dir", raw=True)),
end_time.strftime(conf.get(reader_level, "filename", raw=True)) % values )
print "... search for file: ", filename
filenames=glob(str(filename))
if len(filenames) == 0:
print "*** Error, no file found"
return # just return without exit the program
elif len(filenames) > 1:
print "*** Warning, more than 1 datafile found: ", filenames
filename = filenames[0]
print("... read data from %s" % str(filename))
# read data from hdf5 file
data_folder='U-MARF/MSG/Level1.5/'
# Load data from hdf file
with h5py.File(filename,'r') as hf:
subset_info=hf.get(data_folder+'METADATA/SUBSET')
for i in range(subset_info.len()):
#print subset_info[i]['EntryName'], subset_info[i]['Value']
if subset_info[i]['EntryName'] == "VIS_IRSouthLineSelectedRectangle":
VIS_IRSouthLine = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "VIS_IRNorthLineSelectedRectangle":
VIS_IRNorthLine = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "VIS_IREastColumnSelectedRectangle":
VIS_IREastColumn = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "VIS_IRWestColumnSelectedRectangle":
VIS_IRWestColumn = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "HRVLowerNorthLineSelectedRectangle":
HRVLowerNorthLine = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "HRVLowerSouthLineSelectedRectangle":
HRVLowerSouthLine = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "HRVLowerEastColumnSelectedRectangle":
HRVLowerEastColumn = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "HRVLowerWestColumnSelectedRectangle":
HRVLowerWestColumn = int(subset_info[i]['Value'])
if subset_info[i]['EntryName'] == "HRVUpperSouthLineSelectedRectangle":
HRVUpperSouthLine = int(subset_info[i]['Value']) # 0
if subset_info[i]['EntryName'] == "HRVUpperNorthLineSelectedRectangle":
HRVUpperNorthLine = int(subset_info[i]['Value']) # 0
if subset_info[i]['EntryName'] == "HRVUpperEastColumnSelectedRectangle":
HRVUpperEastColumn = int(subset_info[i]['Value']) # 0
if subset_info[i]['EntryName'] == "HRVUpperWestColumnSelectedRectangle":
HRVUpperWestColumn = int(subset_info[i]['Value']) # 0
sat_status=hf.get(data_folder+'METADATA/HEADER/SatelliteStatus/SatelliteStatus_DESCR')
for i in range(subset_info.len()):
if sat_status[i]['EntryName']=="SatelliteDefinition-NominalLongitude":
sat_lon = sat_status[i]['Value']
break
#print 'VIS_IRSouthLine', VIS_IRSouthLine
#print 'VIS_IRNorthLine', VIS_IRNorthLine
#print 'VIS_IREastColumn', VIS_IREastColumn
#print 'VIS_IRWestColumn', VIS_IRWestColumn
#print 'sat_longitude', sat_lon, type(sat_lon), 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>'
if 1 == 0:
# works only if all pixels are on the disk
from msg_pixcoord2area import msg_pixcoord2area
print "VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn: ", VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn
area_def = msg_pixcoord2area ( VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn, "vis", sat_lon )
else:
# works also for pixels outside of the disk
pname = 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>' # "GEOS<+009.5>"
proj = {'proj': 'geos', 'a': '6378169.0', 'b': '6356583.8', 'h': '35785831.0', 'lon_0': str(sat_lon)}
aex=(-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
# define full disk projection
from pyresample.geometry import AreaDefinition
full_disk_def = AreaDefinition('full_disk',
'full_disk',
pname,
proj,
3712,
3712,
aex )
# define name and calculate area for sub-demain
area_name= 'MSG_'+'{:04d}'.format(VIS_IRNorthLine)+'_'+'{:04d}'.format(VIS_IRWestColumn)+'_'+'{:04d}'.format(VIS_IRSouthLine)+'_'+'{:04d}'.format(VIS_IREastColumn)
aex = full_disk_def.get_area_extent_for_subset(3712-VIS_IRSouthLine,3712-VIS_IRWestColumn,3712-VIS_IRNorthLine,3712-VIS_IREastColumn)
area_def = AreaDefinition(area_name,
area_name,
pname,
proj,
(VIS_IRWestColumn-VIS_IREastColumn)+1,
(VIS_IRNorthLine-VIS_IRSouthLine)+1,
aex )
#print area_def
#print "REGION:", area_def.area_id, "{"
#print "\tNAME:\t", area_def.name
#print "\tPCS_ID:\t", area_def.proj_id
#print ("\tPCS_DEF:\tproj="+area_def.proj_dict['proj']+", lon_0=" + area_def.proj_dict['lon_0'] + ", a="+area_def.proj_dict['a']+", b="+area_def.proj_dict['b']+", h="+area_def.proj_dict['h'])
#print "\tXSIZE:\t", area_def.x_size
#print "\tYSIZE:\t", area_def.y_size
#print "\tAREA_EXTENT:\t", area_def.area_extent
#print "};"
# copy area to satscene
satscene.area = area_def
# write information used by mipp.xrit.MSG._Calibrator in a fake header file
hdr = dict()
# satellite ID number
hdr["SatelliteDefinition"] = dict()
hdr["SatelliteDefinition"]["SatelliteId"] = SatelliteIds[str(satscene.sat_nr())]
# processing
hdr["Level 1_5 ImageProduction"] = dict()
hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"] = np_array([2,2,2,2,2,2,2,2,2,2,2,2], int)
# calibration factors
Level15ImageCalibration = hf.get(data_folder+'METADATA/HEADER/RadiometricProcessing/Level15ImageCalibration_ARRAY')
hdr["Level1_5ImageCalibration"] = dict()
for chn_name in channel_numbers.keys():
chn_nb = channel_numbers[chn_name]-1
hdr["Level1_5ImageCalibration"][chn_nb] = dict()
#print chn_name, chn_nb, Level15ImageCalibration[chn_nb]['Cal_Slope'], Level15ImageCalibration[chn_nb]['Cal_Offset']
hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Slope'] = Level15ImageCalibration[chn_nb]['Cal_Slope']
hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Offset'] = Level15ImageCalibration[chn_nb]['Cal_Offset']
# loop over channels to load
for chn_name in satscene.channels_to_load:
dataset_name = data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA'
if dataset_name in hf:
data_tmp = hf.get(data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA')
LOG.info('hdr["SatelliteDefinition"]["SatelliteId"]: '+str(hdr["SatelliteDefinition"]["SatelliteId"]))
#LOG.info('hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"]', hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"])
chn_nb = channel_numbers[chn_name]-1
LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]: '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]))
LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]: '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]))
if calibrate:
#Calibrator = _Calibrator(hdr, chn_name)
bits_per_pixel = 10 ### !!! I have no idea if this is correct !!!
Calibrator = _Calibrator(hdr, chn_name, bits_per_pixel) ## changed call in mipp/xrit/MSG.py
data, calibration_unit = Calibrator (data_tmp, calibrate=1)
else:
data = data_tmp
calibration_unit = "counts"
LOG.info(chn_name+ " min/max: "+str(data.min())+","+str(data.max())+" "+calibration_unit )
satscene[chn_name] = ma.asarray(data)
satscene[chn_name].info['units'] = calibration_unit
satscene[chn_name].info['satname'] = satscene.satname
satscene[chn_name].info['satnumber'] = satscene.number
satscene[chn_name].info['instrument_name'] = satscene.instrument_name
satscene[chn_name].info['time'] = satscene.time_slot
satscene[chn_name].info['is_calibrated'] = True
else:
print "*** Warning, no data for channel "+ chn_name+ " in file "+ filename
data = np_nan
calibration_unit = ""
LOG.info("*** Warning, no data for channel "+ chn_name+" in file "+filename)
# do not append the channel chn_name
|
mraspaud/mpop
|
mpop/satin/msg_seviri_hdf.py
|
Python
|
gpl-3.0
| 12,369
|
[
"NetCDF"
] |
8c21cd7d47e6ad82e18f85b25f6ba2129964824a98a011d08469070202695427
|
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <jesus.arroyo@bq.com>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
import cv2
import math
import numpy as np
import scipy.ndimage
from horus import Singleton
from horus.engine.calibration.calibration_data import CalibrationData
from horus.engine.algorithms.point_cloud_roi import PointCloudROI
@Singleton
class LaserSegmentation(object):
def __init__(self):
self.calibration_data = CalibrationData()
self.point_cloud_roi = PointCloudROI()
self.red_channel = 'R (RGB)'
self.threshold_enable = False
self.threshold_value = 0
self.blur_enable = False
self.blur_value = 0
self.window_enable = False
self.window_value = 0
self.refinement_method = 'SGF'
def set_red_channel(self, value):
self.red_channel = value
def set_threshold_enable(self, value):
self.threshold_enable = value
def set_threshold_value(self, value):
self.threshold_value = value
def set_blur_enable(self, value):
self.blur_enable = value
def set_blur_value(self, value):
self.blur_value = 2 * value + 1
def set_window_enable(self, value):
self.window_enable = value
def set_window_value(self, value):
self.window_value = value
def set_refinement_method(self, value):
self.refinement_method = value
def compute_2d_points(self, image):
if image is not None:
image = self.compute_line_segmentation(image)
# Peak detection: center of mass
s = image.sum(axis=1)
v = np.where(s > 0)[0]
u = (self.calibration_data.weight_matrix * image).sum(axis=1)[v] / s[v]
if self.refinement_method == 'SGF':
# Segmented gaussian filter
u, v = self._sgf(u, v, s)
elif self.refinement_method == 'RANSAC':
# Random sample consensus
u, v = self._ransac(u, v)
return (u, v), image
def compute_hough_lines(self, image):
if image is not None:
image = self.compute_line_segmentation(image)
lines = cv2.HoughLines(image, 1, np.pi / 180, 120)
# if lines is not None:
# rho, theta = lines[0][0]
# ## Calculate coordinates
# u1 = rho / np.cos(theta)
# u2 = u1 - height * np.tan(theta)
return lines
def compute_line_segmentation(self, image, roi_mask=False):
if image is not None:
# Apply ROI mask
if roi_mask:
image = self.point_cloud_roi.mask_image(image)
# Obtain red channel
image = self._obtain_red_channel(image)
if image is not None:
# Threshold image
image = self._threshold_image(image)
# Window mask
image = self._window_mask(image)
return image
def _obtain_red_channel(self, image):
ret = None
if self.red_channel == 'R (RGB)':
ret = cv2.split(image)[0]
elif self.red_channel == 'Cr (YCrCb)':
ret = cv2.split(cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB))[1]
elif self.red_channel == 'U (YUV)':
ret = cv2.split(cv2.cvtColor(image, cv2.COLOR_RGB2YUV))[1]
return ret
def _threshold_image(self, image):
if self.threshold_enable:
image = cv2.threshold(
image, self.threshold_value, 255, cv2.THRESH_TOZERO)[1]
if self.blur_enable:
image = cv2.blur(image, (self.blur_value, self.blur_value))
image = cv2.threshold(
image, self.threshold_value, 255, cv2.THRESH_TOZERO)[1]
return image
def _window_mask(self, image):
if self.window_enable:
peak = image.argmax(axis=1)
_min = peak - self.window_value
_max = peak + self.window_value + 1
mask = np.zeros_like(image)
for i in xrange(self.calibration_data.height):
mask[i, _min[i]:_max[i]] = 255
# Apply mask
image = cv2.bitwise_and(image, mask)
return image
# Segmented gaussian filter
def _sgf(self, u, v, s):
if len(u) > 1:
i = 0
sigma = 2.0
f = np.array([])
segments = [s[_r] for _r in np.ma.clump_unmasked(np.ma.masked_equal(s, 0))]
# Detect stripe segments
for segment in segments:
j = len(segment)
# Apply gaussian filter
fseg = scipy.ndimage.gaussian_filter(u[i:i + j], sigma=sigma)
f = np.concatenate((f, fseg))
i += j
return f, v
else:
return u, v
# RANSAC implementation: https://github.com/ahojnnes/numpy-snippets/blob/master/ransac.py
def _ransac(self, u, v):
if len(u) > 1:
data = np.vstack((v.ravel(), u.ravel())).T
dr, thetar = self.ransac(data, self.LinearLeastSquares2D(), 2, 2)
u = (dr - v * math.sin(thetar)) / math.cos(thetar)
return u, v
class LinearLeastSquares2D(object):
'''
2D linear least squares using the hesse normal form:
d = x*sin(theta) + y*cos(theta)
which allows you to have vertical lines.
'''
def fit(self, data):
data_mean = data.mean(axis=0)
x0, y0 = data_mean
if data.shape[0] > 2: # over determined
u, v, w = np.linalg.svd(data - data_mean)
vec = w[0]
theta = math.atan2(vec[0], vec[1])
elif data.shape[0] == 2: # well determined
theta = math.atan2(data[1, 0] - data[0, 0], data[1, 1] - data[0, 1])
theta = (theta + math.pi * 5 / 2) % (2 * math.pi)
d = x0 * math.sin(theta) + y0 * math.cos(theta)
return d, theta
def residuals(self, model, data):
d, theta = model
dfit = data[:, 0] * math.sin(theta) + data[:, 1] * math.cos(theta)
return np.abs(d - dfit)
def is_degenerate(self, sample):
return False
def ransac(self, data, model_class, min_samples, threshold, max_trials=100):
'''
Fits a model to data with the RANSAC algorithm.
:param data: numpy.ndarray
data set to which the model is fitted, must be of shape NxD where
N is the number of data points and D the dimensionality of the data
:param model_class: object
object with the following methods implemented:
* fit(data): return the computed model
* residuals(model, data): return residuals for each data point
* is_degenerate(sample): return boolean value if sample choice is
degenerate
see LinearLeastSquares2D class for a sample implementation
:param min_samples: int
the minimum number of data points to fit a model
:param threshold: int or float
maximum distance for a data point to count as an inlier
:param max_trials: int, optional
maximum number of iterations for random sample selection, default 100
:returns: tuple
best model returned by model_class.fit, best inlier indices
'''
best_model = None
best_inlier_num = 0
best_inliers = None
data_idx = np.arange(data.shape[0])
for _ in xrange(max_trials):
sample = data[np.random.randint(0, data.shape[0], 2)]
if model_class.is_degenerate(sample):
continue
sample_model = model_class.fit(sample)
sample_model_residua = model_class.residuals(sample_model, data)
sample_model_inliers = data_idx[sample_model_residua < threshold]
inlier_num = sample_model_inliers.shape[0]
if inlier_num > best_inlier_num:
best_inlier_num = inlier_num
best_inliers = sample_model_inliers
if best_inliers is not None:
best_model = model_class.fit(data[best_inliers])
return best_model
|
bqlabs/horus
|
src/horus/engine/algorithms/laser_segmentation.py
|
Python
|
gpl-2.0
| 8,419
|
[
"Gaussian"
] |
1c3287a2136b6692c9eded4eef2af1cfe57626d7bafa4b445eb07fb379eb21e1
|
#!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Danelle Cline'
__copyright__ = '2018'
__license__ = 'GPL v3'
__contact__ = 'duane at mbari.org'
__doc__ = '''
Master loader for all CANON May-June Campaign 2019
Mike McCann, Duane Edgington, Danelle Cline
MBARI 22 May 2019
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_canon_may2019', 'CANON - May 2019',
description='May 2019 coordinated campaign observations in Monterey Bay',
x3dTerrains={
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
},
'https://stoqs.mbari.org/x3d/Monterey25_1x/Monterey25_1x_src_scene.x3d': {
'name': 'Monterey25_1x',
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '1',
},
},
grdTerrain=os.path.join(parentDir, 'Monterey25.grd')
)
# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
#
startdate = datetime.datetime(2019, 5, 20) # Fixed start.May 20, 2019
enddate = datetime.datetime(2019, 6, 8) # Fixed end. June 8, 2019.
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
#####################################################################
# DORADO
#####################################################################
# Use the attributes built by loadDorad() using startdate and enddate
#####################################################################
# LRAUV
#####################################################################
# Load netCDF files produced (binned, etc.) by Danelle Cline
# These binned files are created with the makeLRAUVNetCDFs.sh script in the
# toNetCDF directory. You must first edit and run that script once to produce
# the binned files before this will work
# Use the default parameters provided by loadLRAUV() calls below
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662a updated parameter names in netCDF file
cl.l_662a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line67/'
cl.l_662a_files = [
'OS_Glider_L_662_20190328_TS.nc',
]
cl.l_662a_parms = ['temperature', 'salinity', 'fluorescence','oxygen']
cl.l_662a_startDatetime = startdate
cl.l_662a_endDatetime = enddate
# NPS_34a updated parameter names in netCDF file
## The following loads decimated subset of data telemetered during deployment
cl.nps34a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps34a_files = [ 'OS_Glider_NPS_G34_20180514_TS.nc' ]
cl.nps34a_parms = ['temperature', 'salinity','fluorescence']
cl.nps34a_startDatetime = startdate
cl.nps34a_endDatetime = enddate
# NPS_29 ##
cl.nps29_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps29_files = [ 'OS_Glider_NPS_G29_20190528_TS.nc' ]
cl.nps29_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps29_startDatetime = startdate
cl.nps29_endDatetime = enddate
# Slocum Teledyne nemesis Glider
## from ioos site ## these files proved to be not compatible with python loader
## cl.slocum_nemesis_base = 'https://data.ioos.us/gliders/thredds/dodsC/deployments/mbari/Nemesis-20170412T0000/'
## cl.slocum_nemesis_files = [ 'Nemesis-20170412T0000.nc3.nc' ]
## from cencoos directory, single non-aggregated files
cl.slocum_nemesis_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/nemesis_201808/'
cl.slocum_nemesis_files = [
'nemesis_20180912T155836_rt0.nc',
]
cl.slocum_nemesis_startDatetime = startdate
cl.slocum_nemesis_endDatetime = enddate
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
##cl.wg_tex_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/final/'
##cl.wg_tex_files = [ 'WG_Tex_all_final.nc' ]
##cl.wg_tex_parms = [ 'wind_dir', 'wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'density', 'bb_470', 'bb_650', 'chl' ]
##cl.wg_tex_startDatetime = startdate
##cl.wg_tex_endDatetime = enddate
# WG Hansen - All instruments combined into one file - one time coordinate
cl.wg_Hansen_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Hansen_files = [
'wgHansen/20190522/realTime/20190522.nc',
]
cl.wg_Hansen_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp_float', 'sal_float', 'water_temp_sub',
'sal_sub', 'bb_470', 'bb_650', 'chl', 'beta_470', 'beta_650', 'pH', 'O2_conc_float','O2_conc_sub' ] # two ctds (_float, _sub), no CO2
cl.wg_Hansen_depths = [ 0 ]
cl.wg_Hansen_startDatetime = startdate
cl.wg_Hansen_endDatetime = enddate
# WG Sparky - All instruments combined into one file - one time coordinate
cl.wg_Sparky_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Sparky_files = [
'wgSparky/20180905/QC/20180905_QC.nc',
]
cl.wg_Sparky_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp_float', 'sal_float', 'water_temp_sub',
'sal_sub', 'bb_470', 'bb_650', 'chl', 'beta_470', 'beta_650', 'pH', 'O2_conc' ] # two ctds (_float, _sub), no CO2
cl.wg_Sparky_depths = [ 0 ]
cl.wg_Sparky_startDatetime = startdate
cl.wg_Sparky_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Tiny_files = [
'wgTiny/20190513/realTime/20190513.nc',
]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_depths = [ 0 ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
# WG OA - All instruments combined into one file - one time coordinate
##cl.wg_oa_base = cl.dodsBase + 'CANON/2015_OffSeason/Platforms/Waveglider/wgOA/'
##cl.wg_oa_files = [ 'Sept_2013_OAWaveglider_final.nc' ]
##cl.wg_oa_parms = [ 'distance', 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'O2_conc',
## 'O2_sat', 'beta_470', 'bb_470', 'beta_700', 'bb_700', 'chl', 'pCO2_water', 'pCO2_air', 'pH' ]
##cl.wg_oa_startDatetime = startdate
##cl.wg_oa_endDatetime = enddate
######################################################################
# MOORINGS
######################################################################
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'201808/OS_M1_20180806hourly_CMSTV.nc',]
cl.m1_parms = [
'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Mooring 0A1
cl.oa1_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA1/201810/realTime/'
cl.oa1_files = [
'OA1_201810.nc'
]
cl.oa1_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa1_startDatetime = startdate
cl.oa1_endDatetime = enddate
# Mooring 0A2
cl.oa2_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA2/201812/'
cl.oa2_files = [
'realTime/OA2_201812.nc'
]
cl.oa2_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa2_startDatetime = startdate
cl.oa2_endDatetime = enddate
######################################################################
# RACHEL CARSON: Jan 2017 --
######################################################################
# UCTD
cl.rcuctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/RachelCarson/uctd/'
cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.rcuctd_files = [
# '00917plm01.nc',
]
# PCTD
cl.rcpctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/RachelCarson/pctd/'
cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
cl.rcpctd_files = [
# '00917c01.nc',
]
######################################################################
# WESTERN FLYER: Apr 2017 --
######################################################################
# UCTD
cl.wfuctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/WesternFlyer/uctd/'
cl.wfuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.wfuctd_files = [
'CN19SM01.nc',
'CN19SM02.nc',
'CN19SM03.nc',
'CN19SM04.nc',
'CN19SM05.nc',
'CN19SM06.nc',
]
# PCTD
cl.wfpctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/WesternFlyer/pctd/'
cl.wfpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
cl.wfpctd_files = [
'CN19SC01.nc',
'CN19SC02.nc',
'CN19SC03.nc',
'CN19SC04.nc',
'CN19SC05.nc',
'CN19SC06.nc',
'CN19SC07.nc',
'CN19SC08.nc',
'CN19SC09.nc',
'CN19SC10.nc',
'CN19SC11.nc',
'CN19SC12.nc',
'CN19SC13.nc',
'CN19SC14.nc',
'CN19SC15.nc',
'CN19SC16.nc',
'CN19SC17.nc',
'CN19SC18.nc',
'CN19SC19.nc',
'CN19SC20.nc',
'CN19SC21.nc',
'CN19SC22.nc',
'CN19SC23.nc',
'CN19SC24.nc',
'CN19SC25.nc',
'CN19SC26.nc',
'CN19SC27.nc',
'CN19SC28.nc',
'CN19SC29.nc',
'CN19SC30.nc',
'CN19SC31.nc',
'CN19SC32.nc',
'CN19SC33.nc',
'CN19SC34.nc',
'CN19SC35.nc',
'CN19SC36.nc',
'CN19SC37.nc',
'CN19SC38.nc',
'CN19SC39.nc',
]
# DEIMOS
cl.deimos_base = cl.dodsBase + 'Other/routine/Platforms/DEIMOS/netcdf/'
cl.deimos_parms = [ 'Sv_mean' ]
cl.deimos_files = [ 'deimos-2019-CANON-Spring.nc' ]
###################################################################################################
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/
# copied to local BOG_Data/N18F
###################################################################################################
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BOG_Data/CN18F/CN18F/')
cl.subsample_csv_files = [
'STOQS_CN19S_CARBON_GFF.csv',
'STOQS_CN19S_ALK.csv',
'STOQS_CN19S_TCO2.csv',
'STOQS_CN19S_TRANSBEAM.csv',
'STOQS_CN19S_COND2.csv',
'STOQS_CN19S_TEMP2.csv',
'STOQS_CN19S_SAL2.csv',
'STOQS_CN19S_OXY_PS.csv',
'STOQS_CN19S_OXY_ML.csv',
'STOQS_CN19S_NH4.csv',
'STOQS_CN19S_PHAEO_5U.csv',
'STOQS_CN19S_CHL_1U.csv',
'STOQS_CN19S_CHL_5U.csv',
'STOQS_CN19S_O2.csv',
'STOQS_CN19S_PHAEO_1U.csv',
'STOQS_CN19S_TRANSMISS.csv',
'STOQS_CN19S_CHLA.csv',
'STOQS_CN19S_FLUOR.csv',
'STOQS_CN19S_CONDUCT.csv',
'STOQS_CN19S_SIG_T.csv',
'STOQS_CN19S_SAL.csv',
'STOQS_CN19S_TMP.csv',
'STOQS_CN19S_PHAEO_GFF.csv',
'STOQS_CN19S_CHL_GFF.csv',
]
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.stride = 100
elif cl.args.stride:
cl.stride = cl.args.stride
# Lot's of debugging in this Campaign - keep these lines here commented out (to be used if problems crop up)
##startdate = datetime.datetime(2019, 5, 23, 18)
##enddate = datetime.datetime(2019, 5, 24, 1)
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190529T194310'
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190529T194310'
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190601T142912'
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190602T025849'
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190529T115150'
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190604T233636'
##cl.daphne_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2019/20190528_20190606/20190602T170120/'
##cl.daphne_files = ['201905291943_201905300048_2S_scieng.nc']
##cl.daphne_files = ['201905291943_201905300048_2S_scieng.nc']
##cl.daphne_files = ['201906011429_201906020258_2S_scieng.nc']
##cl.daphne_files = ['201906020259_201906021701_2S_scieng.nc']
##cl.daphne_files = ['201905291151_201905291713_2S_scieng.nc']
##cl.daphne_files = ['201906042336_201906052235_2S_scieng.nc']
##cl.daphne_files = ['201906021701_201906030901_2S_scieng.nc']
##cl.daphne_parms = ['temperature']
##cl.loadLRAUV('daphne', startdate, enddate, build_attrs=False)
##cl.makai_base = 'http://dods.mbari.org/opendap/data/lrauv/makai/missionlogs/2019/20190528_20190530/20190528T183638'
##cl.makai_base = 'http://dods.mbari.org/opendap/data/lrauv/makai/missionlogs/2019/20190528_20190530/20190529T081339'
##cl.makai_base = 'http://dods.mbari.org/opendap/data/lrauv/makai/missionlogs/2019/20190528_20190530/20190530T005113'
##cl.makai_files = ['201905281836_201905281946_2S_scieng.nc']
##cl.makai_files = ['201905290813_201905300051_2S_scieng.nc']
##cl.makai_files = ['201905300051_201905301829_2S_scieng.nc']
##cl.makai_parms = ['temperature']
##cl.loadLRAUV('makai', startdate, enddate, build_attrs=False)
##sys.exit()
cl.loadM1()
cl.loadDEIMOS(startdate, enddate)
cl.loadL_662a()
cl.load_NPS29()
##cl.load_NPS34a() ## not in this campaign
##cl.load_slocum_nemesis() ## not in this campaign
cl.load_wg_Tiny()
##cl.load_wg_Sparky() ## not in this campaign
cl.load_wg_Hansen() ## new for this campaign
#cl.load_oa1() ## no data during this campaign
cl.load_oa2()
#cl.loadDorado(startdate, enddate, build_attrs=True) ## waiting for first data
cl.loadLRAUV('makai', startdate, enddate)
cl.loadLRAUV('daphne', startdate, enddate)
cl.loadLRAUV('tethys', startdate, enddate)
#cl.loadRCuctd() ## waiting for first data
#cl.loadRCpctd() ## waiting for first data
cl.loadWFuctd()
cl.loadWFpctd()
cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
stoqs/stoqs
|
stoqs/loaders/CANON/loadCANON_may2019.py
|
Python
|
gpl-3.0
| 16,982
|
[
"NetCDF"
] |
8864f918c780803b0407fbbb3a26d7a949799a97600d0fb256994429b0a55380
|
import pytest
from pysisyphus.benchmarks import Benchmark
from pysisyphus.calculators.PySCF import PySCF
# from pysisyphus.helpers_pure import filter_fixture_store
from pysisyphus.optimizers.RFOptimizer import RFOptimizer
from pysisyphus.testing import using
def calc_getter(charge, mult):
return PySCF(basis="sto3g", pal=4, charge=charge, mult=mult)
BakerBm = Benchmark("baker", coord_type="redund", calc_getter=calc_getter)
@pytest.mark.benchmark
@using("pyscf")
@pytest.mark.parametrize("fn, geom, ref_energy", BakerBm)
def test_baker_gs_opt(fn, geom, ref_energy, results_bag):
opt_kwargs = {
"thresh": "baker",
}
opt = RFOptimizer(geom, **opt_kwargs)
opt.run()
results_bag.cycles = opt.cur_cycle + 1
results_bag.is_converged = opt.is_converged
results_bag.energy = geom.energy
results_bag.ref_energy = ref_energy
assert geom.energy == pytest.approx(ref_energy)
# @filter_fixture_store("test_baker_gs_opt")
@pytest.mark.benchmark
def test_baker_gs_opt_synthesis(fixture_store):
for i, fix in enumerate(fixture_store):
print(i, fix)
tot_cycles = 0
converged = 0
bags = fixture_store["results_bag"]
for k, v in bags.items():
if not k.startswith("test_baker_gs_opt"):
continue
print(k)
try:
tot_cycles += v["cycles"]
energy_matches = v["energy"] == pytest.approx(v["ref_energy"])
converged += 1 if v["is_converged"] and energy_matches else 0
for kk, vv in v.items():
print("\t", kk, vv)
except KeyError:
print("\tFailed!")
print(f"Total cycles: {tot_cycles}")
print(f"Converged: {converged}/{len(bags)}")
|
eljost/pysisyphus
|
tests/test_baker/test_baker.py
|
Python
|
gpl-3.0
| 1,721
|
[
"PySCF"
] |
e29b694ca19cb5e7c8d22f5b512ef603f52ab95811c7e77f68b5353bf0351d12
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class F3d(CMakePackage):
"""A fast and minimalist scriptable 3D viewer."""
homepage = "https://f3d-app.github.io"
url = "https://github.com/f3d-app/f3d/archive/refs/tags/v1.1.1.tar.gz"
version('1.1.1', sha256='68bdbe3a90f2cd553d5e090a95d3c847e2a2f06abbe225ffecd47d3d29978b0a')
depends_on('vtk@9:', type='link')
|
LLNL/spack
|
var/spack/repos/builtin/packages/f3d/package.py
|
Python
|
lgpl-2.1
| 559
|
[
"VTK"
] |
c6a732071b632c06359de31d85ecc827e583bd038f9f78953342e23191729410
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The <output> and <file> elements.
'''
from __future__ import print_function
import os
from grit import xtb_reader
from grit.node import base
class FileNode(base.Node):
'''A <file> element.'''
def __init__(self):
super(FileNode, self).__init__()
self.re = None
self.should_load_ = True
def IsTranslation(self):
return True
def GetLang(self):
return self.attrs['lang']
def DisableLoading(self):
self.should_load_ = False
def MandatoryAttributes(self):
return ['path', 'lang']
def RunPostSubstitutionGatherer(self, debug=False):
if not self.should_load_:
return
root = self.GetRoot()
defs = getattr(root, 'defines', {})
target_platform = getattr(root, 'target_platform', '')
xtb_file = open(self.ToRealPath(self.GetInputPath()), 'rb')
try:
lang = xtb_reader.Parse(xtb_file,
self.UberClique().GenerateXtbParserCallback(
self.attrs['lang'], debug=debug),
defs=defs,
target_platform=target_platform)
except:
print("Exception during parsing of %s" % self.GetInputPath())
raise
# Translation console uses non-standard language codes 'iw' and 'no' for
# Hebrew and Norwegian Bokmal instead of 'he' and 'nb' used in Chrome.
# Note that some Chrome's .grd still use 'no' instead of 'nb', but 'nb' is
# always used for generated .pak files.
ALTERNATIVE_LANG_CODE_MAP = { 'he': 'iw', 'nb': 'no' }
assert (lang == self.attrs['lang'] or
lang == ALTERNATIVE_LANG_CODE_MAP[self.attrs['lang']]), (
'The XTB file you reference must contain messages in the language '
'specified\nby the \'lang\' attribute.')
def GetInputPath(self):
return os.path.expandvars(self.attrs['path'])
class OutputNode(base.Node):
'''An <output> element.'''
def MandatoryAttributes(self):
return ['filename', 'type']
def DefaultAttributes(self):
return {
'lang' : '', # empty lang indicates all languages
'language_section' : 'neutral', # defines a language neutral section
'context' : '',
'fallback_to_default_layout' : 'true',
}
def GetType(self):
return self.attrs['type']
def GetLanguage(self):
'''Returns the language ID, default 'en'.'''
return self.attrs['lang']
def GetContext(self):
return self.attrs['context']
def GetFilename(self):
return self.attrs['filename']
def GetOutputFilename(self):
path = None
if hasattr(self, 'output_filename'):
path = self.output_filename
else:
path = self.attrs['filename']
return os.path.expandvars(path)
def GetFallbackToDefaultLayout(self):
return self.attrs['fallback_to_default_layout'].lower() == 'true'
def _IsValidChild(self, child):
return isinstance(child, EmitNode)
class EmitNode(base.ContentNode):
''' An <emit> element.'''
def DefaultAttributes(self):
return { 'emit_type' : 'prepend'}
def GetEmitType(self):
'''Returns the emit_type for this node. Default is 'append'.'''
return self.attrs['emit_type']
|
scheib/chromium
|
tools/grit/grit/node/node_io.py
|
Python
|
bsd-3-clause
| 3,329
|
[
"xTB"
] |
30f224ef57ac90bd3a5a628f9b443f324c2613b6cf547adf7abfe9195e00acc4
|
########################################################################
# $HeadURL$
########################################################################
"""
:mod: FTSMonitorAgent
=====================
The FTSMonitorAgent takes FTS Requests from the TransferDB and monitors their execution
using FTSRequest helper class.
:deprecated:
"""
# # imports
import time
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.DataManagementSystem.DB.TransferDB import TransferDB
from DIRAC.DataManagementSystem.Client.FTSRequest import FTSRequest
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
# # RCSID
__RCSID__ = "$Id$"
# # agent's name
AGENT_NAME = 'DataManagement/FTSMonitorAgent'
class FTSMonitorAgent( AgentModule ):
"""
.. class:: FTSMonitorAgent
Monitor submitted FTS jobs.
"""
# # transfer DB handle
transferDB = None
# # thread pool
threadPool = None
# # min threads
minThreads = 1
# # max threads
maxThreads = 10
# # missing source regexp patterns
missingSourceErrors = [
re.compile( r"SOURCE error during TRANSFER_PREPARATION phase: \[INVALID_PATH\] Failed" ),
re.compile( r"SOURCE error during TRANSFER_PREPARATION phase: \[INVALID_PATH\] No such file or directory" ),
re.compile( r"SOURCE error during PREPARATION phase: \[INVALID_PATH\] Failed" ),
re.compile( r"SOURCE error during PREPARATION phase: \[INVALID_PATH\] The requested file either does not exist" ),
re.compile( r"TRANSFER error during TRANSFER phase: \[INVALID_PATH\] the server sent an error response: 500 500"\
" Command failed. : open error: No such file or directory" ),
re.compile( r"SOURCE error during TRANSFER_PREPARATION phase: \[USER_ERROR\] source file doesnt exist" ) ]
def initialize( self ):
""" agent's initialisation """
self.transferDB = TransferDB()
self.am_setOption( "shifterProxy", "DataManager" )
self.minThreads = self.am_getOption( "MinThreads", self.minThreads )
self.maxThreads = self.am_getOption( "MaxThreads", self.maxThreads )
minmax = ( abs( self.minThreads ), abs( self.maxThreads ) )
self.minThreads, self.maxThreads = min( minmax ), max( minmax )
self.log.info( "ThreadPool min threads = %s" % self.minThreads )
self.log.info( "ThreadPool max threads = %s" % self.maxThreads )
self.threadPool = ThreadPool( self.minThreads, self.maxThreads )
self.threadPool.daemonize()
return S_OK()
def execute( self ):
""" push jobs to the thread pool """
self.log.info( "Obtaining requests to monitor" )
res = self.transferDB.getFTSReq()
if not res["OK"]:
self.log.error( "Failed to get FTS requests", res['Message'] )
return res
if not res["Value"]:
self.log.info( "No FTS requests found to monitor." )
return S_OK()
ftsReqs = res["Value"]
self.log.info( "Found %s FTS jobs" % len( ftsReqs ) )
i = 1
for ftsJob in ftsReqs:
while True:
self.log.debug( "submitting FTS Job %s FTSReqID=%s to monitor" % ( i, ftsJob["FTSReqID"] ) )
ret = self.threadPool.generateJobAndQueueIt( self.monitorTransfer, args = ( ftsJob, ), )
if ret["OK"]:
i += 1
break
# # sleep 1 second to proceed
time.sleep( 1 )
self.threadPool.processAllResults()
return S_OK()
def ftsJobExpired( self, ftsReqID, channelID ):
""" clean up when FTS job had expired on the server side
:param int ftsReqID: FTSReq.FTSReqID
:param int channelID: FTSReq.ChannelID
"""
log = gLogger.getSubLogger( "@%s" % str( ftsReqID ) )
fileIDs = self.transferDB.getFTSReqFileIDs( ftsReqID )
if not fileIDs["OK"]:
log.error( "Unable to retrieve FileIDs associated to %s request" % ftsReqID )
return fileIDs
fileIDs = fileIDs["Value"]
# # update FileToFTS table, this is just a clean up, no worry if somethings goes wrong
for fileID in fileIDs:
fileStatus = self.transferDB.setFileToFTSFileAttribute( ftsReqID, fileID,
"Status", "Failed" )
if not fileStatus["OK"]:
log.error( "Unable to set FileToFTS status to 'Failed' for FileID %s: %s" % ( fileID,
fileStatus["Message"] ) )
failReason = self.transferDB.setFileToFTSFileAttribute( ftsReqID, fileID,
"Reason", "FTS job expired on server" )
if not failReason["OK"]:
log.error( "Unable to set FileToFTS reason for FileID %s: %s" % ( fileID,
failReason["Message"] ) )
# # update Channel table
resetChannels = self.transferDB.resetFileChannelStatus( channelID, fileIDs )
if not resetChannels["OK"]:
log.error( "Failed to reset Channel table for files to retry" )
return resetChannels
# # update FTSReq table
log.info( "Setting FTS request status to 'Finished'" )
ftsReqStatus = self.transferDB.setFTSReqStatus( ftsReqID, "Finished" )
if not ftsReqStatus["OK"]:
log.error( "Failed update FTS Request status", ftsReqStatus["Message"] )
return ftsReqStatus
# # if we land here, everything should be OK
return S_OK()
def monitorTransfer( self, ftsReqDict ):
""" monitors transfer obtained from TransferDB
:param dict ftsReqDict: FTS job dictionary
"""
ftsReqID = ftsReqDict.get( "FTSReqID" )
ftsGUID = ftsReqDict.get( "FTSGuid" )
ftsServer = ftsReqDict.get( "FTSServer" )
channelID = ftsReqDict.get( "ChannelID" )
sourceSE = ftsReqDict.get( "SourceSE" )
targetSE = ftsReqDict.get( "TargetSE" )
oFTSRequest = FTSRequest()
oFTSRequest.setFTSServer( ftsServer )
oFTSRequest.setFTSGUID( ftsGUID )
oFTSRequest.setSourceSE( sourceSE )
oFTSRequest.setTargetSE( targetSE )
log = gLogger.getSubLogger( "@%s" % str( ftsReqID ) )
#########################################################################
# Perform summary update of the FTS Request and update FTSReq entries.
log.info( "Perform summary update of the FTS Request" )
infoStr = [ "glite-transfer-status -s %s -l %s" % ( ftsServer, ftsGUID ) ]
infoStr.append( "FTS GUID: %s" % ftsGUID )
infoStr.append( "FTS Server: %s" % ftsServer )
log.info( "\n".join( infoStr ) )
res = oFTSRequest.summary()
self.transferDB.setFTSReqLastMonitor( ftsReqID )
if not res["OK"]:
log.error( "Failed to update the FTS request summary", res["Message"] )
if "getTransferJobSummary2: Not authorised to query request" in res["Message"]:
log.error( "FTS job is not existing at the FTS server anymore, will clean it up on TransferDB side" )
cleanUp = self.ftsJobExpired( ftsReqID, channelID )
if not cleanUp["OK"]:
log.error( cleanUp["Message"] )
return cleanUp
return res
res = oFTSRequest.dumpSummary()
if not res['OK']:
log.error( "Failed to get FTS request summary", res["Message"] )
return res
log.info( res['Value'] )
res = oFTSRequest.getPercentageComplete()
if not res['OK']:
log.error( "Failed to get FTS percentage complete", res["Message"] )
return res
log.info( 'FTS Request found to be %.1f percent complete' % res["Value"] )
self.transferDB.setFTSReqAttribute( ftsReqID, "PercentageComplete", res["Value"] )
self.transferDB.addLoggingEvent( ftsReqID, res["Value"] )
#########################################################################
# Update the information in the TransferDB if the transfer is terminal.
res = oFTSRequest.isRequestTerminal()
if not res["OK"]:
log.error( "Failed to determine whether FTS request terminal", res["Message"] )
return res
if not res["Value"]:
return S_OK()
# # request is terminal
return self.terminalRequest( oFTSRequest, ftsReqID, channelID, sourceSE )
def terminalRequest( self, oFTSRequest, ftsReqID, channelID, sourceSE ):
""" process terminal FTS job
:param FTSRequest oFTSRequest: FTSRequest instance
:param int ftsReqID: FTSReq.FTSReqID
:param int channelID: FTSReq.ChannelID
:param str sourceSE: FTSReq.SourceSE
"""
log = gLogger.getSubLogger( "@%s" % ftsReqID )
log.info( "FTS Request found to be terminal, updating file states" )
#########################################################################
# Get the LFNS associated to the FTS request
log.info( "Obtaining the LFNs associated to this request" )
res = self.transferDB.getFTSReqLFNs( ftsReqID, channelID, sourceSE )
if not res["OK"]:
log.error( "Failed to obtain FTS request LFNs", res['Message'] )
return res
files = res["Value"]
if not files:
log.error( "No files present for transfer" )
return S_ERROR( "No files were found in the DB" )
lfns = files.keys()
log.debug( "Obtained %s files" % len( lfns ) )
for lfn in lfns:
oFTSRequest.setLFN( lfn )
res = oFTSRequest.monitor()
if not res["OK"]:
log.error( "Failed to perform detailed monitoring of FTS request", res["Message"] )
return res
res = oFTSRequest.getFailed()
if not res["OK"]:
log.error( "Failed to obtained failed files for FTS request", res["Message"] )
return res
failedFiles = res["Value"]
res = oFTSRequest.getDone()
if not res["OK"]:
log.error( "Failed to obtained successful files for FTS request", res["Message"] )
return res
completedFiles = res["Value"]
# An LFN can be included more than once if it was entered into more than one Request.
# FTS will only do the transfer once. We need to identify all FileIDs
res = self.transferDB.getFTSReqFileIDs( ftsReqID )
if not res["OK"]:
log.error( "Failed to get FileIDs associated to FTS Request", res["Message"] )
return res
fileIDs = res["Value"]
res = self.transferDB.getAttributesForFilesList( fileIDs, ["LFN"] )
if not res["OK"]:
log.error( "Failed to get LFNs associated to FTS Request", res["Message"] )
return res
fileIDDict = res["Value"]
fileToFTSUpdates = []
completedFileIDs = []
filesToRetry = []
filesToFail = []
for fileID, fileDict in fileIDDict.items():
lfn = fileDict['LFN']
if lfn in completedFiles:
completedFileIDs.append( fileID )
transferTime = 0
res = oFTSRequest.getTransferTime( lfn )
if res["OK"]:
transferTime = res["Value"]
fileToFTSUpdates.append( ( fileID, "Completed", "", 0, transferTime ) )
if lfn in failedFiles:
failReason = ""
res = oFTSRequest.getFailReason( lfn )
if res["OK"]:
failReason = res["Value"]
if "Source file/user checksum mismatch" in failReason:
filesToFail.append( fileID )
continue
if self.missingSource( failReason ):
log.error( "The source SURL does not exist.", "%s %s" % ( lfn, oFTSRequest.getSourceSURL( lfn ) ) )
filesToFail.append( fileID )
else:
filesToRetry.append( fileID )
log.error( "Failed to replicate file on channel.", "%s %s" % ( channelID, failReason ) )
fileToFTSUpdates.append( ( fileID, "Failed", failReason, 0, 0 ) )
# # update TransferDB.FileToFTS table
updateFileToFTS = self.updateFileToFTS( ftsReqID, channelID,
filesToRetry, filesToFail,
completedFileIDs, fileToFTSUpdates )
if updateFileToFTS["OK"] and updateFileToFTS["Value"]:
res = oFTSRequest.finalize()
if not res["OK"]:
log.error( "Failed to perform the finalization for the FTS request", res["Message"] )
return res
log.info( 'Adding logging event for FTS request' )
# Now set the FTSReq status to terminal so that it is not monitored again
res = self.transferDB.addLoggingEvent( ftsReqID, 'Finished' )
if not res['OK']:
log.error( 'Failed to add logging event for FTS Request', res['Message'] )
# update TransferDB.FileToCat table
updateFileToCat = self.updateFileToCat( oFTSRequest, channelID, fileIDDict, completedFiles, filesToFail )
if not updateFileToCat["OK"]:
log.error( updateFileToCat["Message"] )
log.debug( "Updating FTS request status" )
res = self.transferDB.setFTSReqStatus( ftsReqID, 'Finished' )
if not res['OK']:
log.error( 'Failed update FTS Request status', res['Message'] )
return S_OK()
def updateFileToFTS( self, ftsReqID, channelID, filesToRetry, filesToFail, completedFileIDs, fileToFTSUpdates ):
""" update TransferDB.FileToFTS table for finished request
:param int ftsReqID: FTSReq.FTSReqID
:param int channelID: FTSReq.ChannelID
:param list filesToRetry: FileIDs to retry
:param list filesToFail: FileIDs for failed files
:param list completedFileIDs: files completed
:param list fileToFTSUpdates: ???
"""
log = gLogger.getSubLogger( "@%s" % ftsReqID )
allUpdated = True
res = self.transferDB.resetFileChannelStatus( channelID, filesToRetry ) if filesToRetry else S_OK()
if not res["OK"]:
log.error( "Failed to update the Channel table for file to retry.", res["Message"] )
allUpdated = False
for fileID in filesToFail:
log.info( "Updating the Channel table for files to reschedule" )
res = self.transferDB.setFileToReschedule( fileID )
if not res["OK"]:
log.error( "Failed to update Channel table for failed files.", res["Message"] )
allUpdated = False
elif res["Value"] == "max reschedule attempt reached":
log.error( "setting Channel status to 'Failed' : " % res["Value"] )
res = self.transferDB.setFileChannelStatus( channelID, fileID, 'Failed' )
if not res["OK"]:
log.error( "Failed to update Channel table for failed files.", res["Message"] )
allUpdated = False
if completedFileIDs:
res = self.transferDB.updateCompletedChannelStatus( channelID, completedFileIDs )
if not res["OK"]:
log.error( "Failed to update the Channel table for successful files.", res["Message"] )
allUpdated = False
res = self.transferDB.updateAncestorChannelStatus( channelID, completedFileIDs )
if not res["OK"]:
log.error( 'Failed to update the Channel table for ancestors of successful files.', res['Message'] )
allUpdated = False
if fileToFTSUpdates:
res = self.transferDB.setFileToFTSFileAttributes( ftsReqID, channelID, fileToFTSUpdates )
if not res["OK"]:
log.error( "Failed to update the FileToFTS table for files.", res["Message"] )
allUpdated = False
return S_OK( allUpdated )
def updateFileToCat( self, oFTSRequest, channelID, fileIDDict, completedFiles, filesToFail ):
""" update TransferDB.FileToCat table for finished request
:param FTSRequest oFTSRequest: FTSRequest instance
:param int ftsReqID: FTSReq.FTSReqID
:param dict fileIDDict: fileIDs dictionary
:param int channelID: FTSReq.ChannelID
"""
res = oFTSRequest.getFailedRegistrations()
failedRegistrations = res["Value"]
regFailedFileIDs = []
regDoneFileIDs = []
regForgetFileIDs = []
for fileID, fileDict in fileIDDict.items():
lfn = fileDict['LFN']
if lfn in failedRegistrations:
regFailedFileIDs.append( fileID )
# if the LFN appears more than once, FileToCat needs to be reset only once
del failedRegistrations[lfn]
elif lfn in completedFiles:
regDoneFileIDs.append( fileID )
elif fileID in filesToFail:
regForgetFileIDs.append( fileID )
res = self.transferDB.setRegistrationWaiting( channelID, regFailedFileIDs ) if regFailedFileIDs else S_OK()
if not res["OK"]:
res["Message"] = "Failed to reset entries in FileToCat: %s" % res["Message"]
return res
res = self.transferDB.setRegistrationDone( channelID, regDoneFileIDs ) if regDoneFileIDs else S_OK()
if not res["OK"]:
res["Message"] = "Failed to set entries Done in FileToCat: %s" % res["Message"]
return res
# This entries could also be set to Failed, but currently there is no method to do so.
res = self.transferDB.setRegistrationDone( channelID, regForgetFileIDs ) if regForgetFileIDs else S_OK()
if not res["OK"]:
res["Message"] = "Failed to set entries Done in FileToCat: %s" % res["Message"]
return res
return S_OK()
@classmethod
def missingSource( cls, failReason ):
""" check if message sent by FTS server is concering missing source file
:param str failReason: message sent by FTS server
"""
for error in cls.missingSourceErrors:
if error.search( failReason ):
return 1
return 0
|
avedaee/DIRAC
|
DataManagementSystem/Agent/FTSMonitorAgent.py
|
Python
|
gpl-3.0
| 17,035
|
[
"DIRAC"
] |
88f90422a87c6f67f7cb88814bf22b494dcc0c45d1f2ddb83386c4d6ee5a48e6
|
# Copyright 2017 Antoine Miech All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of models which operate on variable-length sequences.
"""
import math
import models
import video_level_models
import tensorflow as tf
import model_utils as utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
import scipy.io as sio
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_bool("gating_remove_diag", False,
"Remove diag for self gating")
flags.DEFINE_bool("lightvlad", False,
"Light or full NetVLAD")
flags.DEFINE_bool("vlagd", False,
"vlagd of vlad")
flags.DEFINE_integer("iterations", 30,
"Number of frames per batch for DBoF.")
flags.DEFINE_bool("dbof_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_bool(
"sample_random_frames", True,
"If true samples random frames (for frame level models). If false, a random"
"sequence of frames is sampled instead.")
flags.DEFINE_integer("dbof_cluster_size", 16384,
"Number of units in the DBoF cluster layer.")
flags.DEFINE_integer("dbof_hidden_size", 2048,
"Number of units in the DBoF hidden layer.")
flags.DEFINE_bool("dbof_relu", True, 'add ReLU to hidden layer')
flags.DEFINE_integer("dbof_var_features", 0,
"Variance features on top of Dbof cluster layer.")
flags.DEFINE_string("dbof_activation", "relu", 'dbof activation')
flags.DEFINE_bool("softdbof_maxpool", False, 'add max pool to soft dbof')
flags.DEFINE_integer("netvlad_cluster_size", 64,
"Number of units in the NetVLAD cluster layer.")
flags.DEFINE_bool("netvlad_relu", True, 'add ReLU to hidden layer')
flags.DEFINE_integer("netvlad_dimred", -1,
"NetVLAD output dimension reduction")
flags.DEFINE_integer("gatednetvlad_dimred", 1024,
"GatedNetVLAD output dimension reduction")
flags.DEFINE_bool("gating", False,
"Gating for NetVLAD")
flags.DEFINE_integer("hidden_size", 1024,
"size of hidden layer for BasicStatModel.")
flags.DEFINE_integer("netvlad_hidden_size", 1024,
"Number of units in the NetVLAD hidden layer.")
flags.DEFINE_integer("netvlad_hidden_size_video", 1024,
"Number of units in the NetVLAD video hidden layer.")
flags.DEFINE_integer("netvlad_hidden_size_audio", 64,
"Number of units in the NetVLAD audio hidden layer.")
flags.DEFINE_bool("netvlad_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_integer("fv_cluster_size", 64,
"Number of units in the NetVLAD cluster layer.")
flags.DEFINE_integer("fv_hidden_size", 2048,
"Number of units in the NetVLAD hidden layer.")
flags.DEFINE_bool("fv_relu", True,
"ReLU after the NetFV hidden layer.")
flags.DEFINE_bool("fv_couple_weights", True,
"Coupling cluster weights or not")
flags.DEFINE_float("fv_coupling_factor", 0.01,
"Coupling factor")
flags.DEFINE_string("dbof_pooling_method", "max",
"The pooling method used in the DBoF cluster layer. "
"Choices are 'average' and 'max'.")
flags.DEFINE_string("video_level_classifier_model", "MoeModel",
"Some Frame-Level models can be decomposed into a "
"generalized pooling operation followed by a "
"classifier layer")
flags.DEFINE_integer("lstm_cells", 1024, "Number of LSTM cells.")
flags.DEFINE_integer("lstm_layers", 2, "Number of LSTM layers.")
flags.DEFINE_integer("lstm_cells_video", 1024, "Number of LSTM cells (video).")
flags.DEFINE_integer("lstm_cells_audio", 128, "Number of LSTM cells (audio).")
flags.DEFINE_integer("gru_cells", 1024, "Number of GRU cells.")
flags.DEFINE_integer("gru_cells_video", 1024, "Number of GRU cells (video).")
flags.DEFINE_integer("gru_cells_audio", 128, "Number of GRU cells (audio).")
flags.DEFINE_integer("gru_layers", 2, "Number of GRU layers.")
flags.DEFINE_bool("lstm_random_sequence", False,
"Random sequence input for lstm.")
flags.DEFINE_bool("gru_random_sequence", False,
"Random sequence input for gru.")
flags.DEFINE_bool("gru_backward", False, "BW reading for GRU")
flags.DEFINE_bool("lstm_backward", False, "BW reading for LSTM")
flags.DEFINE_bool("fc_dimred", True, "Adding FC dimred after pooling")
class LightVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class NetVLAD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlad = tf.matmul(activation,reshaped_input)
vlad = tf.transpose(vlad,perm=[0,2,1])
vlad = tf.subtract(vlad,a)
vlad = tf.nn.l2_normalize(vlad,1)
vlad = tf.reshape(vlad,[-1,self.cluster_size*self.feature_size])
vlad = tf.nn.l2_normalize(vlad,1)
return vlad
class NetVLAGD():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
gate_weights = tf.get_variable("gate_weights",
[1, self.cluster_size,self.feature_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
gate_weights = tf.sigmoid(gate_weights)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
vlagd = tf.matmul(activation,reshaped_input)
vlagd = tf.multiply(vlagd,gate_weights)
vlagd = tf.transpose(vlagd,perm=[0,2,1])
vlagd = tf.nn.l2_normalize(vlagd,1)
vlagd = tf.reshape(vlagd,[-1,self.cluster_size*self.feature_size])
vlagd = tf.nn.l2_normalize(vlagd,1)
return vlagd
class GatedDBoF():
def __init__(self, feature_size,max_frames,cluster_size, max_pool, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.max_pool = max_pool
def forward(self, reshaped_input):
feature_size = self.feature_size
cluster_size = self.cluster_size
add_batch_norm = self.add_batch_norm
max_frames = self.max_frames
is_training = self.is_training
max_pool = self.max_pool
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation_sum = tf.reduce_sum(activation,1)
activation_max = tf.reduce_max(activation,1)
activation_max = tf.nn.l2_normalize(activation_max,1)
dim_red = tf.get_variable("dim_red",
[cluster_size, feature_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
cluster_weights_2 = tf.get_variable("cluster_weights_2",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights_2", cluster_weights_2)
activation = tf.matmul(activation_max, dim_red)
activation = tf.matmul(activation, cluster_weights_2)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn_2")
else:
cluster_biases = tf.get_variable("cluster_biases_2",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases_2", cluster_biases)
activation += cluster_biases
activation = tf.sigmoid(activation)
activation = tf.multiply(activation,activation_sum)
activation = tf.nn.l2_normalize(activation,1)
return activation
class SoftDBoF():
def __init__(self, feature_size,max_frames,cluster_size, max_pool, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.max_pool = max_pool
def forward(self, reshaped_input):
feature_size = self.feature_size
cluster_size = self.cluster_size
add_batch_norm = self.add_batch_norm
max_frames = self.max_frames
is_training = self.is_training
max_pool = self.max_pool
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation_sum = tf.reduce_sum(activation,1)
activation_sum = tf.nn.l2_normalize(activation_sum,1)
if max_pool:
activation_max = tf.reduce_max(activation,1)
activation_max = tf.nn.l2_normalize(activation_max,1)
activation = tf.concat([activation_sum,activation_max],1)
else:
activation = activation_sum
return activation
class DBoF():
def __init__(self, feature_size,max_frames,cluster_size,activation, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.activation = activation
def forward(self, reshaped_input):
feature_size = self.feature_size
cluster_size = self.cluster_size
add_batch_norm = self.add_batch_norm
max_frames = self.max_frames
is_training = self.is_training
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
if activation == 'glu':
space_ind = range(cluster_size/2)
gate_ind = range(cluster_size/2,cluster_size)
gates = tf.sigmoid(activation[:,gate_ind])
activation = tf.multiply(activation[:,space_ind],gates)
elif activation == 'relu':
activation = tf.nn.relu6(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
avg_activation = utils.FramePooling(activation, 'average')
avg_activation = tf.nn.l2_normalize(avg_activation,1)
max_activation = utils.FramePooling(activation, 'max')
max_activation = tf.nn.l2_normalize(max_activation,1)
return tf.concat([avg_activation,max_activation],1)
class NetFV():
def __init__(self, feature_size,max_frames,cluster_size, add_batch_norm, is_training):
self.feature_size = feature_size
self.max_frames = max_frames
self.is_training = is_training
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
def forward(self,reshaped_input):
cluster_weights = tf.get_variable("cluster_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
covar_weights = tf.get_variable("covar_weights",
[self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(mean=1.0, stddev=1 /math.sqrt(self.feature_size)))
covar_weights = tf.square(covar_weights)
eps = tf.constant([1e-6])
covar_weights = tf.add(covar_weights,eps)
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=self.is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[self.cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, self.max_frames, self.cluster_size])
a_sum = tf.reduce_sum(activation,-2,keep_dims=True)
if not FLAGS.fv_couple_weights:
cluster_weights2 = tf.get_variable("cluster_weights2",
[1,self.feature_size, self.cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
else:
cluster_weights2 = tf.scalar_mul(FLAGS.fv_coupling_factor,cluster_weights)
a = tf.multiply(a_sum,cluster_weights2)
activation = tf.transpose(activation,perm=[0,2,1])
reshaped_input = tf.reshape(reshaped_input,[-1,self.max_frames,self.feature_size])
fv1 = tf.matmul(activation,reshaped_input)
fv1 = tf.transpose(fv1,perm=[0,2,1])
# computing second order FV
a2 = tf.multiply(a_sum,tf.square(cluster_weights2))
b2 = tf.multiply(fv1,cluster_weights2)
fv2 = tf.matmul(activation,tf.square(reshaped_input))
fv2 = tf.transpose(fv2,perm=[0,2,1])
fv2 = tf.add_n([a2,fv2,tf.scalar_mul(-2,b2)])
fv2 = tf.divide(fv2,tf.square(covar_weights))
fv2 = tf.subtract(fv2,a_sum)
fv2 = tf.reshape(fv2,[-1,self.cluster_size*self.feature_size])
fv2 = tf.nn.l2_normalize(fv2,1)
fv2 = tf.reshape(fv2,[-1,self.cluster_size*self.feature_size])
fv2 = tf.nn.l2_normalize(fv2,1)
fv1 = tf.subtract(fv1,a)
fv1 = tf.divide(fv1,covar_weights)
fv1 = tf.nn.l2_normalize(fv1,1)
fv1 = tf.reshape(fv1,[-1,self.cluster_size*self.feature_size])
fv1 = tf.nn.l2_normalize(fv1,1)
return tf.concat([fv1,fv2],1)
class NetVLADModelLF(models.BaseModel):
"""Creates a NetVLAD based model.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.netvlad_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.netvlad_cluster_size
hidden1_size = hidden_size or FLAGS.netvlad_hidden_size
relu = FLAGS.netvlad_relu
dimred = FLAGS.netvlad_dimred
gating = FLAGS.gating
remove_diag = FLAGS.gating_remove_diag
lightvlad = FLAGS.lightvlad
vlagd = FLAGS.vlagd
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
if lightvlad:
video_NetVLAD = LightVLAD(1024,max_frames,cluster_size, add_batch_norm, is_training)
audio_NetVLAD = LightVLAD(128,max_frames,cluster_size/2, add_batch_norm, is_training)
elif vlagd:
video_NetVLAD = NetVLAGD(1024,max_frames,cluster_size, add_batch_norm, is_training)
audio_NetVLAD = NetVLAGD(128,max_frames,cluster_size/2, add_batch_norm, is_training)
else:
video_NetVLAD = NetVLAD(1024,max_frames,cluster_size, add_batch_norm, is_training)
audio_NetVLAD = NetVLAD(128,max_frames,cluster_size/2, add_batch_norm, is_training)
if add_batch_norm:# and not lightvlad:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
with tf.variable_scope("video_VLAD"):
vlad_video = video_NetVLAD.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_VLAD"):
vlad_audio = audio_NetVLAD.forward(reshaped_input[:,1024:])
vlad = tf.concat([vlad_video, vlad_audio],1)
vlad_dim = vlad.get_shape().as_list()[1]
hidden1_weights = tf.get_variable("hidden1_weights",
[vlad_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
activation = tf.matmul(vlad, hidden1_weights)
if add_batch_norm and relu:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
if relu:
activation = tf.nn.relu6(activation)
if gating:
gating_weights = tf.get_variable("gating_weights_2",
[hidden1_size, hidden1_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(hidden1_size)))
gates = tf.matmul(activation, gating_weights)
if remove_diag:
#removes diagonals coefficients
diagonals = tf.matrix_diag_part(gating_weights)
gates = gates - tf.multiply(diagonals,activation)
if add_batch_norm:
gates = slim.batch_norm(
gates,
center=True,
scale=True,
is_training=is_training,
scope="gating_bn")
else:
gating_biases = tf.get_variable("gating_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
gates += gating_biases
gates = tf.sigmoid(gates)
activation = tf.multiply(activation,gates)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
class DbofModelLF(models.BaseModel):
"""Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
relu = FLAGS.dbof_relu
cluster_activation = FLAGS.dbof_activation
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
if cluster_activation == 'glu':
cluster_size = 2*cluster_size
video_Dbof = DBoF(1024,max_frames,cluster_size, cluster_activation, add_batch_norm, is_training)
audio_Dbof = DBoF(128,max_frames,cluster_size/8, cluster_activation, add_batch_norm, is_training)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
with tf.variable_scope("video_DBOF"):
dbof_video = video_Dbof.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_DBOF"):
dbof_audio = audio_Dbof.forward(reshaped_input[:,1024:])
dbof = tf.concat([dbof_video, dbof_audio],1)
dbof_dim = dbof.get_shape().as_list()[1]
hidden1_weights = tf.get_variable("hidden1_weights",
[dbof_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(dbof, hidden1_weights)
if add_batch_norm and relu:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
if relu:
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class GatedDbofModelLF(models.BaseModel):
"""Creates a Gated Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
fc_dimred = FLAGS.fc_dimred
relu = FLAGS.dbof_relu
max_pool = FLAGS.softdbof_maxpool
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
video_Dbof = GatedDBoF(1024,max_frames,cluster_size, max_pool, add_batch_norm, is_training)
audio_Dbof = SoftDBoF(128,max_frames,cluster_size/8, max_pool, add_batch_norm, is_training)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
with tf.variable_scope("video_DBOF"):
dbof_video = video_Dbof.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_DBOF"):
dbof_audio = audio_Dbof.forward(reshaped_input[:,1024:])
dbof = tf.concat([dbof_video, dbof_audio],1)
dbof_dim = dbof.get_shape().as_list()[1]
if fc_dimred:
hidden1_weights = tf.get_variable("hidden1_weights",
[dbof_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(dbof, hidden1_weights)
if add_batch_norm and relu:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
if relu:
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
else:
activation = dbof
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
class SoftDbofModelLF(models.BaseModel):
"""Creates a Soft Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
fc_dimred = FLAGS.fc_dimred
relu = FLAGS.dbof_relu
max_pool = FLAGS.softdbof_maxpool
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
video_Dbof = SoftDBoF(1024,max_frames,cluster_size, max_pool, add_batch_norm, is_training)
audio_Dbof = SoftDBoF(128,max_frames,cluster_size/8, max_pool, add_batch_norm, is_training)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
with tf.variable_scope("video_DBOF"):
dbof_video = video_Dbof.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_DBOF"):
dbof_audio = audio_Dbof.forward(reshaped_input[:,1024:])
dbof = tf.concat([dbof_video, dbof_audio],1)
dbof_dim = dbof.get_shape().as_list()[1]
if fc_dimred:
hidden1_weights = tf.get_variable("hidden1_weights",
[dbof_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(dbof, hidden1_weights)
if add_batch_norm and relu:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
if relu:
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
else:
activation = dbof
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
class LstmModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, is_training=True, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
random_frames = FLAGS.lstm_random_sequence
iterations = FLAGS.iterations
backward = FLAGS.lstm_backward
if random_frames:
num_frames_2 = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
model_input = utils.SampleRandomFrames(model_input, num_frames_2,
iterations)
if backward:
model_input = tf.reverse_sequence(model_input, num_frames, seq_axis=1)
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
for _ in range(number_of_layers)
], state_is_tuple=False)
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
class GruModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, is_training=True, **unused_params):
"""Creates a model which uses a stack of GRUs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
gru_size = FLAGS.gru_cells
number_of_layers = FLAGS.gru_layers
backward = FLAGS.gru_backward
random_frames = FLAGS.gru_random_sequence
iterations = FLAGS.iterations
if random_frames:
num_frames_2 = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
model_input = utils.SampleRandomFrames(model_input, num_frames_2,
iterations)
if backward:
model_input = tf.reverse_sequence(model_input, num_frames, seq_axis=1)
stacked_GRU = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.GRUCell(gru_size)
for _ in range(number_of_layers)
], state_is_tuple=False)
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_GRU, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
class NetFVModelLF(models.BaseModel):
"""Creates a NetFV based model.
It emulates a Gaussian Mixture Fisher Vector pooling operations
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.netvlad_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.fv_cluster_size
hidden1_size = hidden_size or FLAGS.fv_hidden_size
relu = FLAGS.fv_relu
gating = FLAGS.gating
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
video_NetFV = NetFV(1024,max_frames,cluster_size, add_batch_norm, is_training)
audio_NetFV = NetFV(128,max_frames,cluster_size/2, add_batch_norm, is_training)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
with tf.variable_scope("video_FV"):
fv_video = video_NetFV.forward(reshaped_input[:,0:1024])
with tf.variable_scope("audio_FV"):
fv_audio = audio_NetFV.forward(reshaped_input[:,1024:])
fv = tf.concat([fv_video, fv_audio],1)
fv_dim = fv.get_shape().as_list()[1]
hidden1_weights = tf.get_variable("hidden1_weights",
[fv_dim, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
activation = tf.matmul(fv, hidden1_weights)
if add_batch_norm and relu:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
if relu:
activation = tf.nn.relu6(activation)
if gating:
gating_weights = tf.get_variable("gating_weights_2",
[hidden1_size, hidden1_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(hidden1_size)))
gates = tf.matmul(activation, gating_weights)
if add_batch_norm:
gates = slim.batch_norm(
gates,
center=True,
scale=True,
is_training=is_training,
scope="gating_bn")
else:
gating_biases = tf.get_variable("gating_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
gates += gating_biases
gates = tf.sigmoid(gates)
activation = tf.multiply(activation,gates)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
is_training=is_training,
**unused_params)
|
antoine77340/Youtube-8M-WILLOW
|
frame_level_models.py
|
Python
|
apache-2.0
| 48,254
|
[
"Gaussian"
] |
d3aa58dd9ea710a5a4754436f0001308c7d6f9cda548e65d41c673d825567f3c
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class computeCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'abandon_instances': ('instance_group_manager', 'instance_group_managers_abandon_instances_request_resource', 'project', 'zone', 'request_id', ),
'add_access_config': ('access_config_resource', 'instance', 'network_interface', 'project', 'zone', 'request_id', ),
'add_association': ('firewall_policy', 'firewall_policy_association_resource', 'replace_existing_association', 'request_id', ),
'add_health_check': ('project', 'region', 'target_pool', 'target_pools_add_health_check_request_resource', 'request_id', ),
'add_instance': ('project', 'region', 'target_pool', 'target_pools_add_instance_request_resource', 'request_id', ),
'add_instances': ('instance_group', 'instance_groups_add_instances_request_resource', 'project', 'zone', 'request_id', ),
'add_nodes': ('node_group', 'node_groups_add_nodes_request_resource', 'project', 'zone', 'request_id', ),
'add_peering': ('network', 'networks_add_peering_request_resource', 'project', 'request_id', ),
'add_resource_policies': ('disk', 'disks_add_resource_policies_request_resource', 'project', 'zone', 'request_id', ),
'add_rule': ('firewall_policy', 'firewall_policy_rule_resource', 'request_id', ),
'add_signed_url_key': ('backend_bucket', 'project', 'signed_url_key_resource', 'request_id', ),
'aggregated_list': ('project', 'filter', 'include_all_scopes', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'apply_updates_to_instances': ('instance_group_manager', 'instance_group_managers_apply_updates_request_resource', 'project', 'zone', ),
'attach_disk': ('attached_disk_resource', 'instance', 'project', 'zone', 'force_attach', 'request_id', ),
'attach_network_endpoints': ('global_network_endpoint_groups_attach_endpoints_request_resource', 'network_endpoint_group', 'project', 'request_id', ),
'bulk_insert': ('bulk_insert_instance_resource_resource', 'project', 'zone', 'request_id', ),
'clone_rules': ('firewall_policy', 'request_id', 'source_firewall_policy', ),
'create_instances': ('instance_group_manager', 'instance_group_managers_create_instances_request_resource', 'project', 'zone', 'request_id', ),
'create_snapshot': ('disk', 'project', 'snapshot_resource', 'zone', 'guest_flush', 'request_id', ),
'delete': ('address', 'project', 'region', 'request_id', ),
'delete_access_config': ('access_config', 'instance', 'network_interface', 'project', 'zone', 'request_id', ),
'delete_instances': ('instance_group_manager', 'instance_group_managers_delete_instances_request_resource', 'project', 'zone', 'request_id', ),
'delete_nodes': ('node_group', 'node_groups_delete_nodes_request_resource', 'project', 'zone', 'request_id', ),
'delete_per_instance_configs': ('instance_group_manager', 'instance_group_managers_delete_per_instance_configs_req_resource', 'project', 'zone', ),
'delete_signed_url_key': ('backend_bucket', 'key_name', 'project', 'request_id', ),
'deprecate': ('deprecation_status_resource', 'image', 'project', 'request_id', ),
'detach_disk': ('device_name', 'instance', 'project', 'zone', 'request_id', ),
'detach_network_endpoints': ('global_network_endpoint_groups_detach_endpoints_request_resource', 'network_endpoint_group', 'project', 'request_id', ),
'disable_xpn_host': ('project', 'request_id', ),
'disable_xpn_resource': ('project', 'projects_disable_xpn_resource_request_resource', 'request_id', ),
'enable_xpn_host': ('project', 'request_id', ),
'enable_xpn_resource': ('project', 'projects_enable_xpn_resource_request_resource', 'request_id', ),
'expand_ip_cidr_range': ('project', 'region', 'subnetwork', 'subnetworks_expand_ip_cidr_range_request_resource', 'request_id', ),
'get': ('accelerator_type', 'project', 'zone', ),
'get_association': ('firewall_policy', 'name', ),
'get_diagnostics': ('interconnect', 'project', ),
'get_effective_firewalls': ('instance', 'network_interface', 'project', 'zone', ),
'get_from_family': ('family', 'project', ),
'get_guest_attributes': ('instance', 'project', 'zone', 'query_path', 'variable_key', ),
'get_health': ('backend_service', 'project', 'resource_group_reference_resource', ),
'get_iam_policy': ('project', 'resource', 'zone', 'options_requested_policy_version', ),
'get_nat_mapping_info': ('project', 'region', 'router', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'get_router_status': ('project', 'region', 'router', ),
'get_rule': ('firewall_policy', 'priority', ),
'get_screenshot': ('instance', 'project', 'zone', ),
'get_serial_port_output': ('instance', 'project', 'zone', 'port', 'start', ),
'get_shielded_instance_identity': ('instance', 'project', 'zone', ),
'get_status': ('project', 'region', 'vpn_gateway', ),
'get_xpn_host': ('project', ),
'get_xpn_resources': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'insert': ('address_resource', 'project', 'region', 'request_id', ),
'invalidate_cache': ('cache_invalidation_rule_resource', 'project', 'url_map', 'request_id', ),
'list': ('project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_associations': ('target_resource', ),
'list_available_features': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_errors': ('instance_group_manager', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_instances': ('instance_group', 'instance_groups_list_instances_request_resource', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_managed_instances': ('instance_group_manager', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_network_endpoints': ('network_endpoint_group', 'project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_nodes': ('node_group', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_peering_routes': ('network', 'project', 'direction', 'filter', 'max_results', 'order_by', 'page_token', 'peering_name', 'region', 'return_partial_success', ),
'list_per_instance_configs': ('instance_group_manager', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_preconfigured_expression_sets': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_referrers': ('instance', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_usable': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'list_xpn_hosts': ('project', 'projects_list_xpn_hosts_request_resource', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ),
'move': ('firewall_policy', 'parent_id', 'request_id', ),
'move_disk': ('disk_move_request_resource', 'project', 'request_id', ),
'move_instance': ('instance_move_request_resource', 'project', 'request_id', ),
'patch': ('autoscaler_resource', 'project', 'zone', 'autoscaler', 'request_id', ),
'patch_per_instance_configs': ('instance_group_manager', 'instance_group_managers_patch_per_instance_configs_req_resource', 'project', 'zone', 'request_id', ),
'patch_rule': ('firewall_policy', 'firewall_policy_rule_resource', 'priority', 'request_id', ),
'preview': ('project', 'region', 'router', 'router_resource', ),
'recreate_instances': ('instance_group_manager', 'instance_group_managers_recreate_instances_request_resource', 'project', 'zone', 'request_id', ),
'remove_association': ('firewall_policy', 'name', 'request_id', ),
'remove_health_check': ('project', 'region', 'target_pool', 'target_pools_remove_health_check_request_resource', 'request_id', ),
'remove_instance': ('project', 'region', 'target_pool', 'target_pools_remove_instance_request_resource', 'request_id', ),
'remove_instances': ('instance_group', 'instance_groups_remove_instances_request_resource', 'project', 'zone', 'request_id', ),
'remove_peering': ('network', 'networks_remove_peering_request_resource', 'project', 'request_id', ),
'remove_resource_policies': ('disk', 'disks_remove_resource_policies_request_resource', 'project', 'zone', 'request_id', ),
'remove_rule': ('firewall_policy', 'priority', 'request_id', ),
'reset': ('instance', 'project', 'zone', 'request_id', ),
'resize': ('disk', 'disks_resize_request_resource', 'project', 'zone', 'request_id', ),
'resume': ('instance', 'project', 'zone', 'request_id', ),
'send_diagnostic_interrupt': ('instance', 'project', 'zone', ),
'set_backend_service': ('project', 'target_ssl_proxies_set_backend_service_request_resource', 'target_ssl_proxy', 'request_id', ),
'set_backup': ('project', 'region', 'target_pool', 'target_reference_resource', 'failover_ratio', 'request_id', ),
'set_common_instance_metadata': ('metadata_resource', 'project', 'request_id', ),
'set_default_network_tier': ('project', 'projects_set_default_network_tier_request_resource', 'request_id', ),
'set_deletion_protection': ('project', 'resource', 'zone', 'deletion_protection', 'request_id', ),
'set_disk_auto_delete': ('auto_delete', 'device_name', 'instance', 'project', 'zone', 'request_id', ),
'set_edge_security_policy': ('backend_bucket', 'project', 'security_policy_reference_resource', 'request_id', ),
'set_iam_policy': ('project', 'resource', 'zone', 'zone_set_policy_request_resource', ),
'set_instance_template': ('instance_group_manager', 'instance_group_managers_set_instance_template_request_resource', 'project', 'zone', 'request_id', ),
'set_labels': ('project', 'resource', 'zone', 'zone_set_labels_request_resource', 'request_id', ),
'set_machine_resources': ('instance', 'instances_set_machine_resources_request_resource', 'project', 'zone', 'request_id', ),
'set_machine_type': ('instance', 'instances_set_machine_type_request_resource', 'project', 'zone', 'request_id', ),
'set_metadata': ('instance', 'metadata_resource', 'project', 'zone', 'request_id', ),
'set_min_cpu_platform': ('instance', 'instances_set_min_cpu_platform_request_resource', 'project', 'zone', 'request_id', ),
'set_named_ports': ('instance_group', 'instance_groups_set_named_ports_request_resource', 'project', 'zone', 'request_id', ),
'set_node_template': ('node_group', 'node_groups_set_node_template_request_resource', 'project', 'zone', 'request_id', ),
'set_private_ip_google_access': ('project', 'region', 'subnetwork', 'subnetworks_set_private_ip_google_access_request_resource', 'request_id', ),
'set_proxy_header': ('project', 'target_ssl_proxies_set_proxy_header_request_resource', 'target_ssl_proxy', 'request_id', ),
'set_quic_override': ('project', 'target_https_proxies_set_quic_override_request_resource', 'target_https_proxy', 'request_id', ),
'set_scheduling': ('instance', 'project', 'scheduling_resource', 'zone', 'request_id', ),
'set_security_policy': ('backend_service', 'project', 'security_policy_reference_resource', 'request_id', ),
'set_service_account': ('instance', 'instances_set_service_account_request_resource', 'project', 'zone', 'request_id', ),
'set_shielded_instance_integrity_policy': ('instance', 'project', 'shielded_instance_integrity_policy_resource', 'zone', 'request_id', ),
'set_ssl_certificates': ('project', 'region', 'region_target_https_proxies_set_ssl_certificates_request_resource', 'target_https_proxy', 'request_id', ),
'set_ssl_policy': ('project', 'ssl_policy_reference_resource', 'target_https_proxy', 'request_id', ),
'set_tags': ('instance', 'project', 'tags_resource', 'zone', 'request_id', ),
'set_target': ('forwarding_rule', 'project', 'region', 'target_reference_resource', 'request_id', ),
'set_target_pools': ('instance_group_manager', 'instance_group_managers_set_target_pools_request_resource', 'project', 'zone', 'request_id', ),
'set_url_map': ('project', 'region', 'target_http_proxy', 'url_map_reference_resource', 'request_id', ),
'set_usage_export_bucket': ('project', 'usage_export_location_resource', 'request_id', ),
'simulate_maintenance_event': ('instance', 'project', 'zone', ),
'start': ('instance', 'project', 'zone', 'request_id', ),
'start_with_encryption_key': ('instance', 'instances_start_with_encryption_key_request_resource', 'project', 'zone', 'request_id', ),
'stop': ('instance', 'project', 'zone', 'request_id', ),
'suspend': ('instance', 'project', 'zone', 'request_id', ),
'switch_to_custom_mode': ('network', 'project', 'request_id', ),
'test_iam_permissions': ('project', 'resource', 'test_permissions_request_resource', 'zone', ),
'update': ('autoscaler_resource', 'project', 'zone', 'autoscaler', 'request_id', ),
'update_access_config': ('access_config_resource', 'instance', 'network_interface', 'project', 'zone', 'request_id', ),
'update_display_device': ('display_device_resource', 'instance', 'project', 'zone', 'request_id', ),
'update_network_interface': ('instance', 'network_interface', 'network_interface_resource', 'project', 'zone', 'request_id', ),
'update_peering': ('network', 'networks_update_peering_request_resource', 'project', 'request_id', ),
'update_per_instance_configs': ('instance_group_manager', 'instance_group_managers_update_per_instance_configs_req_resource', 'project', 'zone', 'request_id', ),
'update_shielded_instance_config': ('instance', 'project', 'shielded_instance_config_resource', 'zone', 'request_id', ),
'validate': ('project', 'region', 'region_url_maps_validate_request_resource', 'url_map', ),
'wait': ('operation', 'project', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=computeCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the compute client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-compute
|
scripts/fixup_compute_v1_keywords.py
|
Python
|
apache-2.0
| 20,432
|
[
"VisIt"
] |
22498df3d5f36426ae065d68cd60594444496b60ced452dc1818650a45c340e1
|
#!/usr/bin/env python
"""
replicate file in the FileCatalog
Can work in two modes.
In the first mode, yuser provides the destination SE with option "-D".
In the second mode, when no destination is given, drepl will look for COMDIRAC configuration options "replication_scheme" and "replication_ses".
If found, those variables will define a list of SEs where to put replicas.
If not found drepl will fallback to configuration option "default_se".
Supported schemes for automated replication (in option "replication_scheme") are:
* all() - replicate file to all SEs listed in option "replication_ses"
* first(N) - replicate file to N first SEs listed in option "replication_ses"
* random(N) - replicatefile to N randomly chosen SEs from the list in option "replication_ses"
"""
import os
import DIRAC
from DIRAC import S_OK, S_ERROR
from COMDIRAC.Interfaces import critical
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import DCatalog
from COMDIRAC.Interfaces import pathFromArguments
if __name__ == "__main__":
import sys
from DIRAC.Core.Base import Script
class Params:
def __init__ ( self ):
self.destinationSE = False
self.sourceSE = False
def setDestinationSE( self, arg ):
self.destinationSE = arg
return S_OK()
def getDestinationSE( self ):
return self.destinationSE
def setSourceSE( self, arg ):
self.sourceSE = arg
return S_OK()
def getSourceSE( self ):
return self.sourceSE
params = Params()
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [options] lfn...' % Script.scriptName,
'Arguments:',
' lfn: file entry in the FileCatalog',
'', 'Examples',
' $ drepl ./some_lfn_file',
' $ drepl -D SOME-DESTINATION-SE-disk ./some_lfn_file',
] )
)
Script.registerSwitch( "D:", "destination-se=", "Storage Element where to put replica (or a comma separated list)", params.setDestinationSE )
Script.registerSwitch( "S:", "source-se=", "source Storage Element for replication", params.setSourceSE )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
session = DSession()
catalog = DCatalog()
if len( args ) < 1:
print "Error: No argument provided\n%s:" % Script.scriptName
Script.showHelp()
DIRAC.exit( -1 )
# default lfn: same file name as local_path
lfns = pathFromArguments( session, args )
# destination SE
dsts = [ ]
if params.destinationSE:
dsts = params.destinationSE.split( "," )
else:
dsts = session.getReplicationSEs()
if not dsts:
dsts = [ session.getEnv( "default_se", "DIRAC-USER" )[ "Value" ] ]
srcopt = ""
if params.sourceSE:
srcopt = " " + params.sourceSE
Script.enableCS()
from DIRAC.DataManagementSystem.Client.FileCatalogClientCLI import FileCatalogClientCLI
fccli = FileCatalogClientCLI( catalog.catalog )
for lfn in lfns:
for dst in dsts:
# print "replicating", lfn, "to SE", dst
fccli.do_replicate( lfn + " " + dst + srcopt )
|
pigay/COMDIRAC
|
Interfaces/scripts/drepl.py
|
Python
|
gpl-3.0
| 3,366
|
[
"DIRAC"
] |
e8e93bbeccbdbba13f8b357951cce8a907e4f6987bccbfc3fef5e3af61398654
|
#
# Dalton Burke, CU Denver
#
# CONUS = [-124.7844079,-66.9513812,24.7433195,49.3457868]
from __future__ import absolute_import
from utils import ensure_dir, symlink_unless_exists
from .downloader import download_url, DownloadError, get_dList
# fast searching of dList
from bisect import bisect
from datetime import datetime, timedelta
from pyhdf import SD
import pytz
import requests
import os
import os.path as osp
import sys
import logging
from six.moves import map
from six.moves import range
class data_sourceError(Exception):
"""
Raised when a level0 source cannot retreive files
"""
pass
class data_source(object):
"""
Parent class of all data sources that implement common functionality, for example
- local validation (file size check)
- HDF retrieval with retries (smart check whether server implements http-range)
"""
def __init__(self, ingest_dir):
"""
Initialize level0 source with ingest directory (where level0 files are stored).
:param ingest_dir: root of level0 storage
"""
self.ingest_dir = osp.abspath(osp.expanduser(ingest_dir))
def retrieve_data(self, from_utc, to_utc, lonlat):
"""
Retrieves all data (geo and active fire) in the given time range and longitude/latitude box. This function is what end users will use to get data
:param from_utc: start time
:param to_utc: end time
:param lonlat: list of form [lowlon, highlon, lowlat, highlat] describing longitude/latitude box
:return: list of paths to local files that were retrieved
"""
# I think all data should be ingested into one directory, then whichever files
# are needed for a given job can be copied to a new folder with a job name
two_weeks_ago = datetime.utcnow() - timedelta(days=14)
manifest = []
if from_utc > two_weeks_ago:
manifest.extend(self.retrieve_l0(from_utc, to_utc))
elif to_utc < two_weeks_ago:
# filter geo_list on intersection with lonlat, the hdf library i'd want to use here isn't ready yet
geo_list = [x for x in self.retrieve_geo(from_utc, to_utc) if geo_intersects(self.ingest_dir + '/' + x, lonlat)]
# geo_list = retrieve_geo(from_utc, to_utc)
manifest.extend(geo_list)
manifest.extend(self.retrieve_af(geo_list))
else:
manifest.extend(self.retrieve_l0(two_weeks_ago + timedelta(minutes=10), to_utc))
# filter geo_list on intersection with lonlat
geo_list = [x for x in self.retrieve_geo(from_utc, two_weeks_ago) if geo_intersect(self.ingest_dir + '/' + x, lonlat)]
# geo_list = retrieve_geo(from_utc, two_weeks_ago)
manifest.extend(geo_list)
manifest.extend(self.retrieve_af(geo_list))
return manifest
def retrieve_geo(self, from_utc, to_utc, ref_utc = None):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
pass
def compute_geo_manifest(from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
pass
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
pass
def compute_af_manifest(geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
pass
def retrieve_l0(self, from_utc, to_utc, ref_utc = None):
"""
Attempts to retrieve the firedata files for the time range.
It should be first verified whether the firedata files are available locally.
For any unavailable files, downloads should be initiated.
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local level0 files
"""
pass
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
pass
def manifest_from_geo(self, geo_list, granule_name):
# prefix later tells us what url we should be looking at
prefix = ''
file_list = []
# pulls directory listing of each relevant page (determined by the 'prefix' of each geo file)
# this yields a super set of the active fire files we care about, we'll refine the list in the next part
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
# manifest contains the final set of exact filenames we care about
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, granule_name + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def download_file(self, url_base, rel_path, max_retries=3):
"""
Download a file and stream to <rel_path> in ingest_dir.
:param url_base: the base URL where the file is hosted
:param rel_path: the relative path of the file
:param max_retries: how many times we may retry to download the file
"""
url = url_base + '/' + rel_path
path = osp.join(self.ingest_dir, rel_path)
try:
# print 'downloading', url
download_url(url, path, max_retries)
# print 'done'
except DownloadError as e:
raise data_sourceError('data_source: failed to download file %s' % url)
def available_locally(self, path):
"""
Check in a level0 file is available locally and if it's file size checks out.
:param path: the level0 file path
"""
info_path = path + '.size'
if osp.exists(path) and osp.exists(info_path):
content_size = int(open(info_path).read())
return osp.getsize(path) == content_size
else:
return False
class MODIS_TERRA(data_source):
"""
750m data from the MODIS instrument on the Terra satellite
"""
def __init__(self, ingest_dir):
# if(satellite = 'terra'):
# self.geo_gran = 'MOD03'
# self.af_gran = 'MOD14'
# elif(satellite = 'aqua'):
# self.geo_gran = 'MYD03'
# self.af_gran = 'MYD14'
# else:
# raise Exception(ValueError)
super(MODIS_TERRA, self).__init__(ingest_dir)
def retrieve_geo(self, from_utc, to_utc):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
manifest = self.compute_geo_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving geolocation data from %s' % (self.url_base_hdf + '/' + self.filepath_geo))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_geo + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_geo_manifest(self, from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
# I don't really want to deal with splitting it on years, so we'll recurse on that
# from now on we can assume that to and from occur in the same year
start_year = from_utc.year
if start_year != to_utc.year:
return compute_geo_manifest(from_utc, datetime(year=start_year, month=12,day=31,hour=23,minute=59)) + \
compute_geo_manifest(datetime(year=start_year+1, month=1, day=1, hour=0, minute=0), to_utc)
# The source has data for different days in different folders, we'll need to get their paths for each day
start_day = (from_utc - datetime(start_year, 1,1)).days + 1
end_day = (to_utc - datetime(start_year, 1, 1)).days + 1
file_list = []
for day in range(start_day, end_day + 1):
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_geo + '/' + str(start_year) + '/' + str(day)))
# we now have a list with all of the filenames during the days that the query requested, so now we'll trim the stuff at the front and back we don't need
# invent a sample filename for the start time, they look like this:
# MOD03.AYYYYDDDD.HHMM.006.#############.hdf
start_filename = 'MOD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, start_day, from_utc.hour, from_utc.minute)
# bisect searches for that sample name and returns the index of where that file should go
# to make sure we get that data we start at the file before it (-1)
start_index = bisect(file_list, start_filename) - 1
# we'll do the same for the last one
end_filename = 'MOD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, end_day, to_utc.hour, to_utc.minute)
end_index = bisect(file_list, end_filename)
return file_list[start_index:end_index]
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
manifest = self.compute_af_manifest(geo_list)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving active fire data from %s' % (self.url_base_hdf + '/' + self.filepath_af))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_af + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_af_manifest(self, geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
prefix = ''
file_list = []
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, 'MOD14' + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def retrieve_l0(self, from_utc, to_utc):
"""
Attempts to retrieve the files to satisfy the simulation request from from_utc to to_utc.
:param from_utc: start time
:param to_utc: end time
:return: list of paths to local level0 files
"""
# This only works for requests going back about the last two weeks
# can add a source for older data later, but I don't think it's needed,
# given the purpose of the project.
if from_utc < datetime.utcnow() - timedelta(days=14):
raise data_sourceError('Requested data older than two weeks')
manifest = self.compute_l0_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving level0s from %s' % (self.url_base_l0 + '/' + self.filepath_l0))
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0, x), nonlocals))
return manifest
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0)
# We want a list of all of the filenames which land between from_utc and to_utc
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# Filenames have this pattern: P0420064AAAAAAAAAAAAAAyyDDDhhmmss000.PDS
current_time = from_utc
days = (current_time - datetime(current_time.year, 1, 1)).days + 1
year = current_time.year % 100
filename = 'P0420064AAAAAAAAAAAAAA%02d%03d%02d%02d%02d000.PDS' % (year, days,
current_time.hour,
current_time.minute,
current_time.second)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
# (-2 since the files come in pairs, one that ends in 000.PDS and one that ends in 001.PDS)
if index == len(dList):
index = index - 2
elif dList[index] != filename:
index = index - 2
level0manifest = []
# Now that we know where to start, we'll begin filling the manifest with relevant files
while current_time < to_utc:
# Add 000.PDS file to manifest
level0manifest.append(dList[index])
# Add 001.PDS file to manifest
level0manifest.append(dList[index+1])
# Move the index to the next pair, if we run out of files just break
index = index + 2
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the time that we get from this exceeds to_utc, we have all the data we want
current_time = current_time.replace(year = 2000 + int(current_file[22:24]))
current_time = current_time.replace(day=1, month=1)
current_time = current_time + timedelta(days=int(current_file[24:27]) - 1)
current_time = current_time.replace(hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
url_base_l0 = 'ftp://is.sci.gsfc.nasa.gov'
filepath_l0 = 'gsfcdata/terra/modis/level0'
url_base_hdf = 'ftp://ladsweb.nascom.nasa.gov'
filepath_geo = 'allData/6/MOD03'
filepath_af = 'allData/6/MOD14'
# Near clone of MODIS_TERRA, only changes to url and filename
class MODIS_AQUA(data_source):
"""
750m data from the MODIS instrument on the Aqua satellite
Uniqueness- Requires data from two directories on the source server,
modis data denoted with _m, and gbad data denoted with _g
"""
def __init__(self, ingest_dir):
super(MODIS_AQUA, self).__init__(ingest_dir)
def retrieve_geo(self, from_utc, to_utc):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
manifest = self.compute_geo_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving geolocation data from %s' % (self.url_base_hdf + '/' + self.filepath_geo))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_geo + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_geo_manifest(self, from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
# I don't really want to deal with splitting it on years, so we'll recurse on that
# from now on we can assume that to and from occur in the same year
start_year = from_utc.year
if start_year != to_utc.year:
return compute_geo_manifest(from_utc, datetime(year=start_year, month=12,day=31,hour=23,minute=59)) + \
compute_geo_manifest(datetime(year=start_year+1, month=1, day=1, hour=0, minute=0), to_utc)
# The source has data for different days in different folders, we'll need to get their paths for each day
start_day = (from_utc - datetime(start_year, 1,1)).days + 1
end_day = (to_utc - datetime(start_year, 1, 1)).days + 1
file_list = []
for day in range(start_day, end_day + 1):
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_geo + '/' + str(start_year) + '/' + str(day)))
geoMeta = []
i = from_utc.replace(hour=0,minute=0,second=0,microsecond=0)
end_date = to_utc.replace(hour=0,minute=0,second=0,microsecond=0)
gran = 'MYD03'
url = 'ftp://ladsweb.nascom.nasa.gov'
path = 'geoMeta/6/AQUA'
while i <= end_date:
#geoMeta.append('ftp://ladsweb.nascom.nasa.gov/geoMeta/6/AQUA/' + str(year) + '/MYD03_' + str(year) + '-' + str(month) + '-' + str(day) + '.txt')
geoMeta.append('%s/%s/%04d/%s_%04d-%02d-%02d.txt' % (url, path, i.year, gran, i.year, i.month, i.day))
i = i + timedelta(days=1)
#######################################################################################################################################################
# we now have a list with all of the filenames during the days that the query requested
# so now we'll trim the stuff at the front and back we don't need.
# invent a sample filename for the start time, they look like this:
# MYD03.AYYYYDDDD.HHMM.006.#############.hdf
start_filename = 'MYD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, start_day, from_utc.hour, from_utc.minute)
# bisect searches for that sample name and returns the index of where that file should go
# to make sure we get that data we start at the file before it (-1)
start_index = bisect(file_list, start_filename) - 1
# we'll do the same for the last one
end_filename = 'MYD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, end_day, to_utc.hour, to_utc.minute)
end_index = bisect(file_list, end_filename)
return file_list[start_index:end_index]
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
manifest = self.compute_af_manifest(geo_list)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving active fire data from %s' % (self.url_base_hdf + '/' + self.filepath_af))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_af + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_af_manifest(self, geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
prefix = ''
file_list = []
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, 'MYD14' + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def retrieve_l0(self, from_utc, to_utc):
"""
Attempts to retrive the files to satisfy the simulation request from from_utc to to_utc.
:param from_utc: start time
:param to_utc: end time
:return: list of paths to local level0 files
"""
# This only works for requests going back about the last two weeks
# can add a source for older data later, but I don't think it's needed,
# given the purpose of the project.
if from_utc < datetime.utcnow() - timedelta(days=14):
raise data_sourceError('Requested data older than two weeks')
manifest_m = self.compute_l0_manifest_m(from_utc, to_utc)
manifest_g = self.compute_l0_manifest_g(from_utc, to_utc)
nonlocals_m = [x for x in manifest_m if not self.available_locally(osp.join(self.ingest_dir, x))]
nonlocals_g = [x for x in manifest_g if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving level0s from %s' % self.url_base_l0 + '/' + self.filepath_l0_m)
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0_m, x), nonlocals_m))
logging.info('Retrieving level0s from %s' % self.url_base_l0 + '/' + self.filepath_l0_g)
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0_g, x), nonlocals_g))
return manifest_m + manifest_g
def compute_l0_manifest_m(self, from_utc, to_utc):
"""
Compute list of MODIS files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# We want a list of all of the filenames which land between from_utc and to_utc
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0_m)
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# Filenames have this pattern: P1540064AAAAAAAAAAAAAAyyDDDhhmmss000.PDS
current_time = from_utc
days = (current_time - datetime(current_time.year, 1, 1)).days + 1
year = current_time.year % 100
filename = 'P1540064AAAAAAAAAAAAAA%02d%03d%02d%02d%02d000.PDS' % (year, days,
current_time.hour,
current_time.minute,
current_time.second)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
# (-2 since the files come in pairs, one that ends in 000.PDS and one that ends in 001.PDS)
if index == len(dList):
index = index - 2
elif dList[index] != filename:
index = index - 2
level0manifest = []
while current_time < to_utc:
# Add 000.PDS File
level0manifest.append(dList[index])
# Add 001.PDS file
level0manifest.append(dList[index+1])
# Move index to start of next pair,
index = index + 2
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the time on the next file is bigger than to_utc, then we have all the files we care about
current_time = current_time.replace(year = 2000 + int(current_file[22:24]))
current_time = current_time.replace(day=1, month=1)
current_time = current_time + timedelta(days=int(current_file[24:27]) - 1)
current_time = current_time.replace(hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
def compute_l0_manifest_g(self, from_utc, to_utc):
"""
Compute list of GBAD files (AQUA specific) in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# We want a list of all of the filenames which land between from_utc and to_utc
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0_g)
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# Filenames have this pattern: P1540064AAAAAAAAAAAAAAyyDDDhhmmss000.PDS
current_time = from_utc
days = (current_time - datetime(current_time.year, 1, 1)).days + 1
year = current_time.year % 100
filename = 'P1540957AAAAAAAAAAAAAA%02d%03d%02d%02d%02d000.PDS' % (year, days,
current_time.hour,
current_time.minute,
current_time.second)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
# (-4 because for each time there are 4 GBAD files, however there are only 2 we care for)
if index == len(dList):
index = index - 4
elif dList[index] != filename:
index = index - 4
level0manifest = []
while current_time < to_utc:
# Add 000.PDS file
level0manifest.append(dList[index])
# Add 001.PDS file
level0manifest.append(dList[index+1])
# Move index to next pair, (remember, there are 4 GBAD files, we only care about 2 of them)
# If we run out of filenames before reaching to_utc, that's fine, just break
index = index + 4
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the new time is bigger than to_utc, we have all of the files we care about
current_time = current_time.replace(year = 2000 + int(current_file[22:24]))
current_time = current_time.replace(day=1, month=1)
current_time = current_time + timedelta(days=int(current_file[24:27]) - 1)
current_time = current_time.replace(hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
url_base_l0 = 'ftp://is.sci.gsfc.nasa.gov'
filepath_l0_m = 'gsfcdata/aqua/modis/level0'
filepath_l0_g = 'gsfcdata/aqua/gbad'
url_base_hdf = 'ftp://ladsweb.nascom.nasa.gov'
filepath_geo = 'allData/6/MYD03'
filepath_af = 'allData/6/MYD14'
class VIIRS_NPP(data_source):
"""
375m data from VIIRS instrument on the NPP satellite
"""
def __init__(self, ingest_dir):
super(VIIRS_NPP, self).__init__(ingest_dir)
def retrieve_l0(self, from_utc, to_utc):
"""
Attempts to retrive the files to satisfy the simulation request from from_utc to to_utc.
:param from_utc: start time
:param to_utc: end time
:return: list of paths to local level0 files
"""
# This only works for requests going back about the last two weeks
# can add a source for older data later, but I don't think it's needed,
# given the purpose of the project.
if from_utc < datetime.utcnow() - timedelta(days=14):
raise data_sourceError('Requested data older than two weeks')
manifest = self.compute_l0_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving level0s from %s' % self.url_base_l0 + '/' + self.filepath_l0)
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0, x), nonlocals))
return manifest
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# We want a list of all of the filenames which land between from_utc and to_utc
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0)
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# format: RNSCA-RVIRS_npp_dYYYYMMdd_thhmmssS_ehhmmssS_bnnnnn_cnnnnnnnnnnnnnnnnnnnn_aaaa_aaa.h5
filename = 'RNSCA-RVIRS_npp_d%04d%02d%02d_t%02d00000_e000000_b00000_c00000000000000000000_aaaa_aaa.h5' % (from_utc.year,
from_utc.month,
from_utc.day,
from_utc.hour)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
if index == len(dList):
index = index - 1
elif dList[index] != filename:
index = index - 1
current_time = from_utc
level0manifest = []
# there are strange gaps in times between files that I can't reconcile
# so I just take the start of the next file as current_time
while current_time < to_utc:
# Get the file
level0manifest.append(dList[index])
index = index + 1
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the time of the next file is bigger than to_utc, then we have all of the files we care about
current_time = current_time.replace(year=int(current_file[17:21]),
month=int(current_file[21:23]),
day=int(current_file[23:25]),
hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
url_base_l0 = 'ftp://is.sci.gsfc.nasa.gov'
filepath_l0 = 'gsfcdata/npp/viirs/level0'
def geo_intersects(filename, lonlat):
"""
Checks a geolocation file for overlap with a latitude longitude box
:filename: name of file to check
:lonlat: list, [leftlon, rightlon, botlat, toplat]
:return: boolean, true if there was overlap
"""
logging.info("Checking %s for intersection with given lonlat" % filename)
if filename[-4:] != '.hdf':
logging.info("ERROR: %s is not an hdf file" % filename)
return False
try:
hdf = SD.SD(filename)
except:
logging.info("ERROR: failed to load file: %s" % filename)
return False
lon = hdf.select('Longitude')
lat = hdf.select('Latitude')
dim1 = len(lon[:])
dim2 = len(lon[0])
minlon = float(lon[0][0])
maxlon = float(lon[dim1 - 1][dim2 - 1])
minlat = float(lat[dim1 - 1][dim2 - 1])
maxlat = float(lat[0][0])
if minlon > maxlon:
logging.info("File %s crosses dateline (not currently supported), skipping..." % filename)
return False
lonoverlap = minlon < lonlat[1] and maxlon > lonlat[0]
latoverlap = minlat < lonlat[3] and maxlat > lonlat[2]
intersects = lonoverlap and latoverlap
if intersects:
logging.info("File %s intersects given lonlat")
else:
logging.info("File %s does not intersect given lonlat")
return intersects
def manifest_from_geo(geo_list, granule_name):
prefix = ''
file_list = []
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, granule_name + g[5:24] + '99999999999999.hdf') - 1])
return manifest
# wisdom src/vis, postprocessor
# def compute_af_manifest(self, geo_list):
# """
# get list of active fire file names from a set of geolocation files
#
# :param geo_list: list containing geolocation file names
# """
#
# prefix = ''
# file_list = []
#
# for g in geo_list:
# if g[:19] != prefix:
# prefix = g[:19]
# file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
#
# manifest = []
#
# # Search for what the name should look like and use that index to add that name to the manifest
# # this takes n*log(n) time, which I think is pretty good
# for g in geo_list:
# manifest.append(file_list[bisect(file_list, 'MYD14' + g[5:24] + '99999999999999.hdf') - 1])
#
# return manifest
|
vejmelkam/wrfxpy
|
src/ingest/level0_source.py
|
Python
|
mit
| 36,889
|
[
"Dalton"
] |
81050ef567626c360040df0f1b3305f45edf6f89e6348d1a91c0170d28d1b3df
|
# $HeadURL$
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
gBaseLocalSiteSection = "/LocalSite"
def gridEnv():
"""
Return location of gridenv file to get a UI environment
"""
return gConfig.getValue( cfgPath( gBaseLocalSiteSection, 'GridEnv' ), '' )
|
Sbalbp/DIRAC
|
ConfigurationSystem/Client/Helpers/Local.py
|
Python
|
gpl-3.0
| 392
|
[
"DIRAC"
] |
817d10cda55ceb7df721330604ba1a4554397d5650e2b04ac87c320df7845394
|
#
# Copyright 2014-2015 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from astropy.modeling.models import Gaussian2D
from ...constants import FWHM_G
from ..fwhm import compute_fwhm_2d_simple
from ..fwhm import compute_fwhm_1d_simple
def test_fwhm_2d_simple():
xcenter = 120.2
ycenter = 122.3
xsig = 25.2
ysig = 12.8
g2d_model = Gaussian2D(amplitude=1.0, x_mean=xcenter,
y_mean=ycenter, x_stddev=xsig, y_stddev=ysig)
y, x = np.mgrid[:250, :250]
img = g2d_model(x, y)
_peak, fwhmx, fwhmy = compute_fwhm_2d_simple(img, xcenter, ycenter)
assert_allclose(fwhmx, FWHM_G * xsig, rtol=1e-3)
assert_allclose(fwhmy, FWHM_G * ysig, rtol=1e-3)
def test_fwhm_1d_simple():
# Test with square box
ref_peak = 1.0
ref_val = 6.0
Fr_ref = np.zeros((15,))
Fr_ref[4:9] = ref_peak
peak, fwhm = compute_fwhm_1d_simple(Fr_ref, ref_val)
assert_almost_equal(peak, ref_peak)
assert_allclose(fwhm, 5.0)
# Test with a gaussian
rad = np.arange(0, 250, 1.0)
center = 120.2
sigma = 25.3
Fr_ref = np.exp(-0.5 * ((rad - center) / sigma)**2)
peak, fwhm = compute_fwhm_1d_simple(Fr_ref, center)
assert_allclose(fwhm, FWHM_G * sigma, rtol=1e-4)
# Test with a gaussian, not starting in 0
rad = np.arange(10, 260, 1.0)
center = 130.2
sigma = 25.3
Fr_ref = np.exp(-0.5 * ((rad - center) / sigma)**2)
peak, fwhm = compute_fwhm_1d_simple(Fr_ref, center, rad)
assert_allclose(fwhm, FWHM_G * sigma, rtol=1e-4)
|
nicocardiel/numina
|
numina/array/tests/test_fwhm.py
|
Python
|
gpl-3.0
| 1,818
|
[
"Gaussian"
] |
03881d42e149319c07630a2b6a582e2ed7178a7d241b3c8615f95d0d5447dab1
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/covariance/plot_mahalanobis_distances.py
|
Python
|
mit
| 6,231
|
[
"Gaussian"
] |
9dfb0b518c4842d31aafc1b85a1b20a78b6c7c47f7bd4b0124dc0207d5a2e9ea
|
""" This module loads all the classes from the VTK VolumeRendering library into
its namespace. This is an optional module."""
from vtkVolumeRenderingPython import *
|
b3c/VTK-5.8
|
Wrapping/Python/vtk/volumerendering.py
|
Python
|
bsd-3-clause
| 167
|
[
"VTK"
] |
6a910a27e71081b812cb7c765d37d58be5067d928e8a10501c2cf6cc464e7d28
|
"""Use first Brillouin zone (Wigner–Seitz cell) to locate q-points."""
# Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.structure.cells import get_reduced_bases
search_space = np.array(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, -1],
[0, 1, 0],
[0, 1, 1],
[1, -1, -1],
[1, -1, 0],
[1, -1, 1],
[1, 0, -1],
[1, 0, 0],
[1, 0, 1],
[1, 1, -1],
[1, 1, 0],
[1, 1, 1],
[-1, -1, -1],
[-1, -1, 0],
[-1, -1, 1],
[-1, 0, -1],
[-1, 0, 0],
[-1, 0, 1],
[-1, 1, -1],
[-1, 1, 0],
[-1, 1, 1],
[0, -1, -1],
[0, -1, 0],
[0, -1, 1],
[0, 0, -1],
],
dtype="intc",
)
def get_qpoints_in_Brillouin_zone(
reciprocal_lattice, qpoints, only_unique=False, tolerance=0.01
):
"""Move qpoints to first Brillouin zone by lattice translation."""
bz = BrillouinZone(reciprocal_lattice)
bz.run(qpoints)
if only_unique:
return np.array(
[pts[0] for pts in bz.shortest_qpoints], dtype="double", order="C"
)
else:
return bz.shortest_qpoints
class BrillouinZone:
"""Move qpoints to first Brillouin zone by lattice translation.
Attributes
----------
shortest_qpoints : list
Each element of the list contains a set of q-points that are in first
Brillouin zone (BZ). When inside BZ, there is only one q-point for
each element, but on the surface, multiple q-points that are
distinguished by non-zero lattice translation are stored.
"""
def __init__(self, reciprocal_lattice, tolerance=0.01):
"""Init method.
Parameters
----------
reciprocal_lattice : array_like
Reciprocal primitive cell basis vectors given in column vectors.
shape=(3,3)
dtype=float
tolerance : float, optional
Default = 0.01
"""
self._reciprocal_lattice = np.array(reciprocal_lattice)
self._tolerance = min(np.sum(reciprocal_lattice ** 2, axis=0)) * tolerance
self._reduced_bases = get_reduced_bases(reciprocal_lattice.T)
self._tmat = np.dot(
np.linalg.inv(self._reciprocal_lattice), self._reduced_bases.T
)
self._tmat_inv = np.linalg.inv(self._tmat)
self.shortest_qpoints = None
def run(self, qpoints):
"""Find q-points inside Wigner–Seitz cell."""
reduced_qpoints = np.dot(qpoints, self._tmat_inv.T)
reduced_qpoints -= np.rint(reduced_qpoints)
self.shortest_qpoints = []
for q in reduced_qpoints:
distances = (np.dot(q + search_space, self._reduced_bases) ** 2).sum(axis=1)
min_dist = min(distances)
shortest_indices = np.where(distances < min_dist + self._tolerance)[0]
self.shortest_qpoints.append(
np.dot(search_space[shortest_indices] + q, self._tmat.T)
)
|
atztogo/phonopy
|
phonopy/structure/brillouin_zone.py
|
Python
|
bsd-3-clause
| 4,584
|
[
"phonopy"
] |
30e341a0c285beaaa89bfbc292e945e9951848f0803f33ec9bbcbe9d89d9bdcf
|
###
# Copyright (c) 2010, Daniel Folkinshteyn
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('OTCOrderBook', True)
OTCOrderBook = conf.registerPlugin('OTCOrderBook')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(OTCOrderBook, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
conf.registerGlobalValue(OTCOrderBook, 'orderExpiry',
registry.NonNegativeInteger(604800, """Time until order expiry. Unless a user
calls 'refresh', orders will expire after this many seconds. Set to 0 for no
expiry. It's a good idea to have this set to avoid seeing your database
overgrow with old cruft."""))
conf.registerGlobalValue(OTCOrderBook, 'minTrustForLongOrders',
registry.NonNegativeInteger(15, """Minimum total level 1 and level 2
trust from nanotube to be able to place long duration orders."""))
conf.registerGlobalValue(OTCOrderBook, 'longOrderDuration',
registry.NonNegativeInteger(7776000, """Extra time on top of standard
order expiry, allotted to long-duration orders. Time in seconds."""))
conf.registerGlobalValue(OTCOrderBook, 'maxUserOpenOrders',
registry.NonNegativeInteger(4, """Only allow this many open orders per user.
It's a good idea to have this on, to avoid order flooding from a rogue
user."""))
conf.registerGlobalValue(OTCOrderBook, 'maxOrdersInBookList',
registry.NonNegativeInteger(4, """Only allow this many orders in a currency
order book to be spit out to channel. If more than that exist, suggest to
visit the nice website."""))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
tecan/xchat-rt
|
plugins/scripts/encryption/supybot-bitcoin-marketmonitor-master/OTCOrderBook/config.py
|
Python
|
gpl-2.0
| 3,613
|
[
"VisIt"
] |
b351dacb4bd075b795e6be0fbd446fc4b9b433056797317d029b09c19846daed
|
#!/usr/bin/env python
"""
Install.py tool to build the GPU library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# help message
HELP = """
Syntax from src dir: make lib-gpu args="-m machine -h hdir -a arch -p precision -e esuffix -b -o osuffix"
Syntax from lib dir: python Install.py -m machine -h hdir -a arch -p precision -e esuffix -b -o osuffix
specify one or more options, order does not matter
copies an existing Makefile.machine in lib/gpu to Makefile.auto
optionally edits these variables in Makefile.auto:
CUDA_HOME, CUDA_ARCH, CUDA_PRECISION, EXTRAMAKE
optionally uses Makefile.auto to build the GPU library -> libgpu.a
and to copy a Makefile.lammps.esuffix -> Makefile.lammps
optionally copies Makefile.auto to a new Makefile.osuffix
See lib/gpu/README and the LAMMPS manual for more information
on which settings to use and how to build.
Examples:
make lib-gpu args="-b" # build GPU lib with default Makefile.linux
make lib-gpu args="-m xk7 -p single -o xk7.single" # create new Makefile.xk7.single, altered for single-precision
make lib-gpu args="-m mpi -a sm_35 -p single -o mpi.mixed -b" # create new Makefile.mpi.mixed, also build GPU lib with these settings
"""
# parse and process arguments
parser.add_argument("-b", "--build", action="store_true",
help="build the GPU library from scratch from a customized Makefile.auto")
parser.add_argument("-m", "--machine", default='linux',
help="suffix of Makefile.machine used as base for customizing Makefile.auto")
parser.add_argument("-a", "--arch", default='sm_50',
choices=['sm_12', 'sm_13', 'sm_20', 'sm_21', 'sm_30', 'sm_35', 'sm_37',
'sm_50', 'sm_52', 'sm_60', 'sm_61', 'sm_70', 'sm_75', 'sm_80'],
help="set GPU architecture and instruction set (default: 'sm_50')")
parser.add_argument("-p", "--precision", default='mixed', choices=['single', 'mixed', 'double'],
help="set GPU kernel precision mode (default: mixed)")
parser.add_argument("-e", "--extramake", default='standard',
help="set EXTRAMAKE variable in Makefile.auto to Makefile.lammps.<extramake>")
parser.add_argument("-c", "--cuda",
help="set CUDA_HOME variable in Makefile.auto. Will be used if $CUDA_HOME environment variable is not set")
parser.add_argument("-o", "--output",
help="if set, copy final Makefile.auto to Makefile.<output> for later re-use")
args = parser.parse_args()
# print help message and exit, if neither build nor output options are given
if not args.build and not args.output:
parser.print_help()
sys.exit(HELP)
hflag = 0
eflag = 0
makeflag = 0
outflag = 0
if args.build:
makeflag = 1
isuffix = args.machine
arch = args.arch
if args.precision == "double":
precstr = "-D_DOUBLE_DOUBLE"
elif args.precision == "mixed":
precstr = "-D_SINGLE_DOUBLE"
else:
precstr = "-D_SINGLE_SINGLE"
lmpsuffix = args.extramake
if args.cuda:
hflag = 1
hdir = args.cuda
if args.output:
outflag = 1
osuffix = args.output
# create Makefile.auto
# reset EXTRAMAKE, CUDA_HOME, CUDA_ARCH, CUDA_PRECISION if requested
if not os.path.exists("Makefile.%s" % isuffix):
sys.exit("lib/gpu/Makefile.%s does not exist" % isuffix)
lines = open("Makefile.%s" % isuffix, 'r').readlines()
fp = open("Makefile.auto", 'w')
for line in lines:
words = line.split()
if len(words) != 3:
fp.write(line)
continue
if hflag and words[0] == "CUDA_HOME" and words[1] == '=':
line = line.replace(words[2], hdir)
if words[0] == "CUDA_ARCH" and words[1] == '=':
line = line.replace(words[2], "-arch=%s" % arch)
if words[0] == "CUDA_PRECISION" and words[1] == '=':
line = line.replace(words[2], precstr)
if eflag and words[0] == "EXTRAMAKE" and words[1] == '=':
line = line.replace(words[2], "Makefile.lammps.%s" % lmpsuffix)
fp.write(line)
fp.close()
# perform make
# make operations copies EXTRAMAKE file to Makefile.lammps
if makeflag:
print("Building libgpu.a ...")
if os.path.exists("libgpu.a"):
os.remove("libgpu.a")
n_cpus = get_cpus()
cmd = "make -f Makefile.auto clean; make -f Makefile.auto -j%d" % n_cpus
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
if not os.path.exists("libgpu.a"):
sys.exit("Build of lib/gpu/libgpu.a was NOT successful")
if not os.path.exists("Makefile.lammps"):
sys.exit("lib/gpu/Makefile.lammps was NOT created")
# copy new Makefile.auto to Makefile.osuffix
if outflag:
print("Creating new Makefile.%s" % osuffix)
shutil.copyfile("Makefile.auto", "Makefile.%s" % osuffix)
|
jeremiahyan/lammps
|
lib/gpu/Install.py
|
Python
|
gpl-2.0
| 5,134
|
[
"LAMMPS"
] |
589abb464fde2768584fe1d7e16d03ad3d5947588acb7d476b365861d75f5d34
|
"""
Base classes for job runner plugins.
"""
import os
import time
import string
import logging
import datetime
import threading
import subprocess
from Queue import Queue, Empty
import galaxy.jobs
from galaxy.jobs.command_factory import build_command
from galaxy import model
from galaxy.util import DATABASE_MAX_STRING_SIZE, shrink_stream_by_size
from galaxy.util import in_directory
from galaxy.util import ParamsWithSpecs
from galaxy.util.bunch import Bunch
from galaxy.jobs.runners.util.job_script import job_script
from galaxy.jobs.runners.util.env import env_to_statement
from .state_handler_factory import build_state_handlers
log = logging.getLogger( __name__ )
STOP_SIGNAL = object()
JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE = "Invalid job runner parameter for this plugin: %s"
JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE = "Job runner parameter '%s' value '%s' could not be converted to the correct type"
JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE = "Job runner parameter %s failed validation"
class RunnerParams( ParamsWithSpecs ):
def _param_unknown_error( self, name ):
raise Exception( JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE % name )
def _param_map_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE % ( name, value ) )
def _param_vaildation_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE % name )
class BaseJobRunner( object ):
DEFAULT_SPECS = dict( recheck_missing_job_retries=dict( map=int, valid=lambda x: x >= 0, default=0 ) )
def __init__( self, app, nworkers, **kwargs ):
"""Start the job runner
"""
self.app = app
self.sa_session = app.model.context
self.nworkers = nworkers
runner_param_specs = self.DEFAULT_SPECS.copy()
if 'runner_param_specs' in kwargs:
runner_param_specs.update( kwargs.pop( 'runner_param_specs' ) )
if kwargs:
log.debug( 'Loading %s with params: %s', self.runner_name, kwargs )
self.runner_params = RunnerParams( specs=runner_param_specs, params=kwargs )
self.runner_state_handlers = build_state_handlers()
def _init_worker_threads(self):
"""Start ``nworkers`` worker threads.
"""
self.work_queue = Queue()
self.work_threads = []
log.debug('Starting %s %s workers' % (self.nworkers, self.runner_name))
for i in range(self.nworkers):
worker = threading.Thread( name="%s.work_thread-%d" % (self.runner_name, i), target=self.run_next )
worker.setDaemon( True )
worker.start()
self.work_threads.append( worker )
def run_next(self):
"""Run the next item in the work queue (a job waiting to run)
"""
while 1:
( method, arg ) = self.work_queue.get()
if method is STOP_SIGNAL:
return
# id and name are collected first so that the call of method() is the last exception.
try:
# arg should be a JobWrapper/TaskWrapper
job_id = arg.get_id_tag()
except:
job_id = 'unknown'
try:
name = method.__name__
except:
name = 'unknown'
try:
method(arg)
except:
log.exception( "(%s) Unhandled exception calling %s" % ( job_id, name ) )
# Causes a runner's `queue_job` method to be called from a worker thread
def put(self, job_wrapper):
"""Add a job to the queue (by job identifier), indicate that the job is ready to run.
"""
# Change to queued state before handing to worker thread so the runner won't pick it up again
job_wrapper.change_state( model.Job.states.QUEUED )
# Persist the destination so that the job will be included in counts if using concurrency limits
job_wrapper.set_job_destination( job_wrapper.job_destination, None )
self.mark_as_queued(job_wrapper)
def mark_as_queued(self, job_wrapper):
self.work_queue.put( ( self.queue_job, job_wrapper ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker threads
"""
log.info( "%s: Sending stop signal to %s worker threads" % ( self.runner_name, len( self.work_threads ) ) )
for i in range( len( self.work_threads ) ):
self.work_queue.put( ( STOP_SIGNAL, None ) )
# Most runners should override the legacy URL handler methods and destination param method
def url_to_destination(self, url):
"""
Convert a legacy URL to a JobDestination.
Job runner URLs are deprecated, JobDestinations should be used instead.
This base class method converts from a URL to a very basic
JobDestination without destination params.
"""
return galaxy.jobs.JobDestination(runner=url.split(':')[0])
def parse_destination_params(self, params):
"""Parse the JobDestination ``params`` dict and return the runner's native representation of those params.
"""
raise NotImplementedError()
def prepare_job(self, job_wrapper, include_metadata=False, include_work_dir_outputs=True):
"""Some sanity checks that all runners' queue_job() methods are likely to want to do
"""
job_id = job_wrapper.get_id_tag()
job_state = job_wrapper.get_state()
job_wrapper.is_ready = False
job_wrapper.runner_command_line = None
# Make sure the job hasn't been deleted
if job_state == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the %s queue" % ( job_id, self.runner_name ) )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return False
elif job_state != model.Job.states.QUEUED:
log.info( "(%s) Job is in state %s, skipping execution" % ( job_id, job_state ) )
# cleanup may not be safe in all states
return False
# Prepare the job
try:
job_wrapper.prepare()
job_wrapper.runner_command_line = self.build_command_line(
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs
)
except:
log.exception("(%s) Failure preparing job" % job_id)
job_wrapper.fail( "failure preparing job", exception=True )
return False
if not job_wrapper.runner_command_line:
job_wrapper.finish( '', '' )
return False
return True
# Runners must override the job handling methods
def queue_job(self, job_wrapper):
raise NotImplementedError()
def stop_job(self, job):
raise NotImplementedError()
def recover(self, job, job_wrapper):
raise NotImplementedError()
def build_command_line( self, job_wrapper, include_metadata=False, include_work_dir_outputs=True ):
# TODO: Eliminate extra kwds no longer used (since LWR skips
# abstraction and calls build_command directly).
container = self._find_container( job_wrapper )
return build_command(
self,
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
container=container
)
def get_work_dir_outputs( self, job_wrapper, job_working_directory=None ):
"""
Returns list of pairs (source_file, destination) describing path
to work_dir output file and ultimate destination.
"""
if not job_working_directory:
job_working_directory = os.path.abspath( job_wrapper.working_directory )
# Set up dict of dataset id --> output path; output path can be real or
# false depending on outputs_to_working_directory
output_paths = {}
for dataset_path in job_wrapper.get_output_fnames():
path = dataset_path.real_path
if self.app.config.outputs_to_working_directory:
path = dataset_path.false_path
output_paths[ dataset_path.dataset_id ] = path
output_pairs = []
# Walk job's output associations to find and use from_work_dir attributes.
job = job_wrapper.get_job()
job_tool = job_wrapper.tool
for dataset_assoc in job.output_datasets + job.output_library_datasets:
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations:
if isinstance( dataset, self.app.model.HistoryDatasetAssociation ):
joda = self.sa_session.query( self.app.model.JobToOutputDatasetAssociation ).filter_by( job=job, dataset=dataset ).first()
if joda and job_tool:
hda_tool_output = job_tool.outputs.get( joda.name, None )
if hda_tool_output and hda_tool_output.from_work_dir:
# Copy from working dir to HDA.
# TODO: move instead of copy to save time?
source_file = os.path.join( job_working_directory, hda_tool_output.from_work_dir )
destination = job_wrapper.get_output_destination( output_paths[ dataset.dataset_id ] )
if in_directory( source_file, job_working_directory ):
output_pairs.append( ( source_file, destination ) )
else:
# Security violation.
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
def _handle_metadata_externally( self, job_wrapper, resolve_requirements=False ):
"""
Set metadata externally. Used by the local and lwr job runners where this
shouldn't be attached to command-line to execute.
"""
#run the metadata setting script here
#this is terminate-able when output dataset/job is deleted
#so that long running set_meta()s can be canceled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
set_extension=True,
tmp_dir=job_wrapper.working_directory,
#we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
kwds={ 'overwrite' : False } )
if resolve_requirements:
dependency_shell_commands = self.app.datatypes_registry.set_external_metadata_tool.build_dependency_shell_commands()
if dependency_shell_commands:
if isinstance( dependency_shell_commands, list ):
dependency_shell_commands = "&&".join( dependency_shell_commands )
external_metadata_script = "%s&&%s" % ( dependency_shell_commands, external_metadata_script )
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args=external_metadata_script,
shell=True,
env=os.environ,
preexec_fn=os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
def get_job_file(self, job_wrapper, **kwds):
job_metrics = job_wrapper.app.job_metrics
job_instrumenter = job_metrics.job_instrumenters[ job_wrapper.job_destination.id ]
env_setup_commands = kwds.get( 'env_setup_commands', [] )
env_setup_commands.append( job_wrapper.get_env_setup_clause() or '' )
destination = job_wrapper.job_destination or {}
envs = destination.get( "env", [] )
for env in envs:
env_setup_commands.append( env_to_statement( env ) )
command_line = job_wrapper.runner_command_line
options = dict(
job_instrumenter=job_instrumenter,
galaxy_lib=job_wrapper.galaxy_lib_dir,
env_setup_commands=env_setup_commands,
working_directory=os.path.abspath( job_wrapper.working_directory ),
command=command_line,
)
## Additional logging to enable if debugging from_work_dir handling, metadata
## commands, etc... (or just peak in the job script.)
job_id = job_wrapper.job_id
log.debug( '(%s) command is: %s' % ( job_id, command_line ) )
options.update(**kwds)
return job_script(**options)
def _complete_terminal_job( self, ajs, **kwargs ):
if ajs.job_wrapper.get_state() != model.Job.states.DELETED:
self.work_queue.put( ( self.finish_job, ajs ) )
def _find_container(
self,
job_wrapper,
compute_working_directory=None,
compute_tool_directory=None,
compute_job_directory=None
):
if not compute_working_directory:
compute_working_directory = job_wrapper.working_directory
if not compute_tool_directory:
compute_tool_directory = job_wrapper.tool.tool_dir
tool = job_wrapper.tool
from galaxy.tools.deps import containers
tool_info = containers.ToolInfo(tool.containers, tool.requirements)
job_info = containers.JobInfo(compute_working_directory, compute_tool_directory, compute_job_directory)
destination_info = job_wrapper.job_destination.params
return self.app.container_finder.find_container(
tool_info,
destination_info,
job_info
)
def _handle_runner_state( self, runner_state, job_state ):
try:
for handler in self.runner_state_handlers.get(runner_state, []):
handler(self.app, self, job_state)
if job_state.runner_state_handled:
break
except:
log.exception('Caught exception in runner state handler:')
def mark_as_resubmitted( self, job_state ):
job_state.job_wrapper.mark_as_resubmitted()
if not self.app.config.track_jobs_in_database:
job_state.job_wrapper.change_state( model.Job.states.QUEUED )
self.app.job_manager.job_handler.dispatcher.put( job_state.job_wrapper )
class JobState( object ):
"""
Encapsulate state of jobs.
"""
runner_states = Bunch(
WALLTIME_REACHED = 'walltime_reached',
GLOBAL_WALLTIME_REACHED = 'global_walltime_reached',
OUTPUT_SIZE_LIMIT = 'output_size_limit'
)
def __init__( self ):
self.runner_state_handled = False
def set_defaults( self, files_dir ):
if self.job_wrapper is not None:
id_tag = self.job_wrapper.get_id_tag()
if files_dir is not None:
self.job_file = JobState.default_job_file( files_dir, id_tag )
self.output_file = os.path.join( files_dir, 'galaxy_%s.o' % id_tag )
self.error_file = os.path.join( files_dir, 'galaxy_%s.e' % id_tag )
self.exit_code_file = os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
job_name = 'g%s' % id_tag
if self.job_wrapper.tool.old_id:
job_name += '_%s' % self.job_wrapper.tool.old_id
if self.job_wrapper.user:
job_name += '_%s' % self.job_wrapper.user
self.job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
@staticmethod
def default_job_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.sh' % id_tag )
@staticmethod
def default_exit_code_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
class AsynchronousJobState( JobState ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
needed for various job runners to capture additional information needed
to communicate with distributed resource manager.
"""
def __init__( self, files_dir=None, job_wrapper=None, job_id=None, job_file=None, output_file=None, error_file=None, exit_code_file=None, job_name=None, job_destination=None ):
super( AsynchronousJobState, self ).__init__()
self.old_state = None
self._running = False
self.check_count = 0
self.start_time = None
self.job_wrapper = job_wrapper
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_destination = job_destination
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults( files_dir )
self.cleanup_file_attributes = [ 'job_file', 'output_file', 'error_file', 'exit_code_file' ]
@property
def running( self ):
return self._running
@running.setter
def running( self, is_running ):
self._running = is_running
# This will be invalid for job recovery
if self.start_time is None:
self.start_time = datetime.datetime.now()
def check_limits( self, runtime=None ):
limit_state = None
if self.job_wrapper.has_limits():
self.check_count += 1
if self.running and (self.check_count % 20 == 0):
if runtime is None:
runtime = datetime.datetime.now() - (self.start_time or datetime.datetime.now())
self.check_count = 0
limit_state = self.job_wrapper.check_limits( runtime=runtime )
if limit_state is not None:
# Set up the job for failure, but the runner will do the actual work
self.runner_state, self.fail_message = limit_state
self.stop_job = True
return True
return False
def cleanup( self ):
for file in [ getattr( self, a ) for a in self.cleanup_file_attributes if hasattr( self, a ) ]:
try:
os.unlink( file )
except Exception, e:
log.debug( "(%s/%s) Unable to cleanup %s: %s" % ( self.job_wrapper.get_id_tag(), self.job_id, file, str( e ) ) )
def register_cleanup_file_attribute( self, attribute ):
if attribute not in self.cleanup_file_attributes:
self.cleanup_file_attributes.append( attribute )
class AsynchronousJobRunner( BaseJobRunner ):
"""Parent class for any job runner that runs jobs asynchronously (e.g. via
a distributed resource manager). Provides general methods for having a
thread to monitor the state of asynchronous jobs and submitting those jobs
to the correct methods (queue, finish, cleanup) at appropriate times..
"""
def __init__( self, app, nworkers, **kwargs ):
super( AsynchronousJobRunner, self ).__init__( app, nworkers, **kwargs )
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.monitor_queue = Queue()
def _init_monitor_thread(self):
self.monitor_thread = threading.Thread( name="%s.monitor_thread" % self.runner_name, target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def handle_stop(self):
# DRMAA and SGE runners should override this and disconnect.
pass
def monitor( self ):
"""
Watches jobs currently in the monitor queue and deals with state
changes (queued to running) and job completion.
"""
while 1:
# Take any new watched jobs and put them on the monitor list
try:
while 1:
async_job_state = self.monitor_queue.get_nowait()
if async_job_state is STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.handle_stop()
return
self.watched.append( async_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
try:
self.check_watched_items()
except Exception:
log.exception('Unhandled exception checking active jobs')
# Sleep a bit before the next state check
time.sleep( 1 )
def monitor_job(self, job_state):
self.monitor_queue.put( job_state )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "%s: Sending stop signal to monitor thread" % self.runner_name )
self.monitor_queue.put( STOP_SIGNAL )
# Call the parent's shutdown method to stop workers
super( AsynchronousJobRunner, self ).shutdown()
def check_watched_items(self):
"""
This method is responsible for iterating over self.watched and handling
state changes and updating self.watched with a new list of watched job
states. Subclasses can opt to override this directly (as older job runners will
initially) or just override check_watched_item and allow the list processing to
reuse the logic here.
"""
new_watched = []
for async_job_state in self.watched:
new_async_job_state = self.check_watched_item(async_job_state)
if new_async_job_state:
new_watched.append(new_async_job_state)
self.watched = new_watched
# Subclasses should implement this unless they override check_watched_items all together.
def check_watched_item(self, job_state):
raise NotImplementedError()
def finish_job( self, job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the job's temporary files.
"""
galaxy_id_tag = job_state.job_wrapper.get_id_tag()
external_job_id = job_state.job_id
# To ensure that files below are readable, ownership must be reclaimed first
job_state.job_wrapper.reclaim_ownership()
# wait for the files to appear
which_try = 0
while which_try < (self.app.config.retry_job_output_collection + 1):
try:
stdout = shrink_stream_by_size( file( job_state.output_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
stderr = shrink_stream_by_size( file( job_state.error_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
which_try = (self.app.config.retry_job_output_collection + 1)
except Exception, e:
if which_try == self.app.config.retry_job_output_collection:
stdout = ''
stderr = 'Job output not returned from cluster'
log.error( '(%s/%s) %s: %s' % ( galaxy_id_tag, external_job_id, stderr, str( e ) ) )
else:
time.sleep(1)
which_try += 1
try:
# This should be an 8-bit exit code, but read ahead anyway:
exit_code_str = file( job_state.exit_code_file, "r" ).read(32)
except:
# By default, the exit code is 0, which typically indicates success.
exit_code_str = "0"
try:
# Decode the exit code. If it's bogus, then just use 0.
exit_code = int(exit_code_str)
except:
log.warning( "(%s/%s) Exit code '%s' invalid. Using 0." % ( galaxy_id_tag, external_job_id, exit_code_str ) )
exit_code = 0
# clean up the job files
if self.app.config.cleanup_job == "always" or ( not stderr and self.app.config.cleanup_job == "onsuccess" ):
job_state.cleanup()
try:
job_state.job_wrapper.finish( stdout, stderr, exit_code )
except:
log.exception( "(%s/%s) Job wrapper finish method failed" % ( galaxy_id_tag, external_job_id ) )
job_state.job_wrapper.fail( "Unable to finish job", exception=True )
def fail_job( self, job_state ):
if getattr( job_state, 'stop_job', True ):
self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )
self._handle_runner_state( 'failure', job_state )
# Not convinced this is the best way to indicate this state, but
# something necessary
if not job_state.runner_state_handled:
job_state.job_wrapper.fail( getattr( job_state, 'fail_message', 'Job failed' ) )
if self.app.config.cleanup_job == "always":
job_state.cleanup()
def mark_as_finished(self, job_state):
self.work_queue.put( ( self.finish_job, job_state ) )
def mark_as_failed(self, job_state):
self.work_queue.put( ( self.fail_job, job_state ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/jobs/runners/__init__.py
|
Python
|
gpl-3.0
| 26,277
|
[
"Galaxy"
] |
1190ab253a8d609b474f9673e45411c3c320f3d12c3953c325181a81eb52817e
|
# encoding: utf-8
import json
import nose
import datetime
import sqlalchemy.orm as orm
import ckan.plugins as p
import ckan.lib.create_test_data as ctd
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
from ckan.common import config
import ckanext.datastore.db as db
from ckanext.datastore.tests.helpers import rebuild_all_dbs, set_url_type
assert_equal = nose.tools.assert_equal
class TestDatastoreUpsertNewTests(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('datastore'):
p.load('datastore')
@classmethod
def teardown_class(cls):
p.unload('datastore')
helpers.reset_db()
def test_upsert_doesnt_crash_with_json_field(self):
resource = factories.Resource()
data = {
'resource_id': resource['id'],
'force': True,
'primary_key': 'id',
'fields': [{'id': 'id', 'type': 'text'},
{'id': 'book', 'type': 'json'},
{'id': 'author', 'type': 'text'}],
}
helpers.call_action('datastore_create', **data)
data = {
'resource_id': resource['id'],
'force': True,
'method': 'insert',
'records': [
{'id': '1',
'book': {'code': 'A', 'title': u'ñ'},
'author': 'tolstoy'}],
}
helpers.call_action('datastore_upsert', **data)
def test_upsert_doesnt_crash_with_json_field_with_string_value(self):
resource = factories.Resource()
data = {
'resource_id': resource['id'],
'force': True,
'primary_key': 'id',
'fields': [{'id': 'id', 'type': 'text'},
{'id': 'book', 'type': 'json'},
{'id': 'author', 'type': 'text'}],
}
helpers.call_action('datastore_create', **data)
data = {
'resource_id': resource['id'],
'force': True,
'method': 'insert',
'records': [
{'id': '1',
'book': u'ñ',
'author': 'tolstoy'}],
}
helpers.call_action('datastore_upsert', **data)
class TestDatastoreUpsert(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('datastore')
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
rebuild_all_dbs(cls.Session)
p.unload('datastore')
def test_upsert_requires_auth(self):
data = {
'resource_id': self.data['resource_id']
}
postparams = '%s=1' % json.dumps(data)
res = self.app.post('/api/action/datastore_upsert', params=postparams,
status=403)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_empty_fails(self):
postparams = '%s=1' % json.dumps({})
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_basic(self):
c = self.Session.connection()
results = c.execute('select 1 from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 2
self.Session.remove()
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{
'author': 'adams',
'nested': {'a': 2, 'b': {'c': 'd'}},
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar'},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].characters == ['Arthur Dent', 'Marvin']
assert json.loads(records[2].nested.json) == {'foo': 'bar'}
self.Session.remove()
c = self.Session.connection()
results = c.execute("select * from \"{0}\" where author='{1}'".format(self.data['resource_id'], 'adams'))
assert results.rowcount == 1
self.Session.remove()
# upsert only the publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'published': '1979-1-1', u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == datetime.datetime(1979, 1, 1)
self.Session.remove()
# delete publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{u'b\xfck': hhguide, 'published': None}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == None
self.Session.remove()
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'author': 'tolkien', u'b\xfck': 'the hobbit'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 4
records = results.fetchall()
assert records[3][u'b\xfck'] == 'the hobbit'
assert records[3].author == 'tolkien'
self.Session.remove()
# test % in records
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'author': 'tol % kien', u'b\xfck': 'the % hobbit'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
def test_upsert_missing_key(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{'author': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_non_existing_field(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{u'b\xfck': 'annakarenina', 'dummy': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_upsert_works_with_empty_list_in_json_field(self):
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'upsert',
'records': [{
'nested': [],
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True, res_dict
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(data['resource_id']))
record = [r for r in results.fetchall() if r[2] == hhguide]
self.Session.remove()
assert len(record) == 1, record
assert_equal(json.loads(record[0][4].json),
data['records'][0]['nested'])
class TestDatastoreInsert(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('datastore')
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
p.unload('datastore')
rebuild_all_dbs(cls.Session)
def test_insert_non_existing_field(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{u'b\xfck': 'the hobbit', 'dummy': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_insert_with_index_violation(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{u'b\xfck': 'annakarenina'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_insert_basic(self):
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{
'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar', 'baz': 3},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
assert results.rowcount == 3
class TestDatastoreUpdate(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('datastore')
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
hhguide = u"hitchhiker's guide to the galaxy"
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}},
{'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar'},
u'b\xfck': hhguide}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
p.unload('datastore')
rebuild_all_dbs(cls.Session)
def test_update_basic(self):
c = self.Session.connection()
results = c.execute('select 1 from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3, results.rowcount
self.Session.remove()
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{
'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'baz': 3},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
self.Session.remove()
c = self.Session.connection()
results = c.execute("select * from \"{0}\" where author='{1}'".format(self.data['resource_id'], 'adams'))
assert results.rowcount == 1
self.Session.remove()
# update only the publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{'published': '1979-1-1', u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == datetime.datetime(1979, 1, 1)
# delete publish date
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{u'b\xfck': hhguide, 'published': None}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
assert results.rowcount == 3
records = results.fetchall()
assert records[2][u'b\xfck'] == hhguide
assert records[2].author == 'adams'
assert records[2].published == None
def test_update_missing_key(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{'author': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_update_non_existing_key(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{u'b\xfck': '', 'author': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
def test_update_non_existing_field(self):
data = {
'resource_id': self.data['resource_id'],
'method': 'update',
'records': [{u'b\xfck': 'annakarenina', 'dummy': 'tolkien'}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_upsert', params=postparams,
extra_environ=auth, status=409)
res_dict = json.loads(res.body)
assert res_dict['success'] is False
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckanext/datastore/tests/test_upsert.py
|
Python
|
gpl-3.0
| 22,942
|
[
"Galaxy"
] |
7b9c9a6ce175c69f40d894ba6b492ebaae8c20d7e93c32456c5aa4ae1d051df7
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'pyparsing',
'tornado',
'python-daemon',
]
if os.environ.get('READTHEDOCS', None) == 'True':
install_requires.append('sqlalchemy')
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
if sys.version_info[:2] < (2, 7):
install_requires.extend(['argparse', 'ordereddict', 'importlib'])
setup(
name='luigi',
version='1.1.3',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='Erik Bernhardsson',
author_email='erikbern@spotify.com',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
)
|
hadesbox/luigi
|
setup.py
|
Python
|
apache-2.0
| 2,911
|
[
"VisIt"
] |
2053dfaccf63cc6276f799f3924276606f2ae12a45be32bc01e681dc952f6f19
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from collections import OrderedDict
from os import linesep as NL
from python_util.options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def _parse_layer_cfg(layer_cfg_path, model):
layers = {}
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
return layers
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={}):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) > 0:
pass
# new_layers = LayerParser._parse_layer_cfg(layer_cfg_path, model)
# for name, layer in new_layers.items():
# if name not in layers:
# raise LayerParsingError("Layer '%s' must already exist in loaded net" % name)
# if layer['type'] != layers[name]['type']:
# raise LayerParsingError("Layer '%s': cannot change layer type" % name)
# # --- copy over relevant parameters
# # copy neuron types
# if layer['type'] == 'neuron':
# layers[name]['neuron'] = layer['neuron']
# layers[name]['usesActs'] = layer['usesActs']
# layers[name]['usesInputs'] = layer['usesInputs']
elif len(layers) == 0:
layers = LayerParser._parse_layer_cfg(layer_cfg_path, model)
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
if dic['pool'] == 'avg':
dic['sum'] = mcp.safe_get_bool(name, 'sum', default=False)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class HingeLossCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
# dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
# if dic['topk'] > dic['numInputs'][1]:
# raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
# if dic['inputLayers'][1]['type'] != 'softmax':
# raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized hinge loss cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.crossent': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.hingeloss': lambda : HingeLossCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([
NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('softlif[m,t,r,a,g,n]', 'f(x) = m / (t + r * log1p(1/j)) + N(0, m*n), where j = g*softrelu(a*x/g)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('softlifalpha[m,t,r,a,g,s]', 'softlif with alpha synapse noise', uses_acts=False, uses_inputs=True),
ParamNeuronParser('softlifalpharc[m,t,r,a,g,s]', 'softlif with alpha-RC synapse noise', uses_acts=False, uses_inputs=True),
], key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
|
hunse/cuda-convnet2
|
layer.py
|
Python
|
apache-2.0
| 83,382
|
[
"Gaussian",
"NEURON"
] |
084df6139955a8cea96ae121e2cda24545ab15a0110f7a2cd06611f1790540a7
|
# __Author__: 'Brian Westerman'
# __Date__: 2/15/16
# __File__: view.py
import numpy as np
class View:
# constructor
def __init__(self):
# automatically resets the view
self.reset()
def reset(self,
vrp=np.matrix([0.5, 0.5, 1]),
vpn=np.matrix([0, 0, -1]),
vup=np.matrix([0, 1, 0]),
u=np.matrix([-1, 0, 0]),
extent=np.array([1, 1, 1]),
screen=np.array([400, 400]),
offset=np.array([20, 20])):
# initializes default values
self.vrp = vrp
self.vpn = vpn
self.vup = vup
self.u = u
self.extent = extent
self.screen = screen
self.offset = offset
self.translationX = 0
self.translationY = 0
def build(self):
# Generate a 4x4 identity matrix, which will be the basis for the view matrix
vtm = np.identity(4, dtype=float)
# Generate a translation matrix to move the VRP to the origin and then premultiply the vtm by the translation matrix
t1 = np.matrix([[1, 0, 0, -self.vrp[0, 0]],
[0, 1, 0, -self.vrp[0, 1]],
[0, 0, 1, -self.vrp[0, 2]],
[0, 0, 0, 1]])
vtm = t1 * vtm
# Calculate the view reference axes tu, tvup, tvpn
# tu is the cross product (np.cross) of the vup and vpn vectors
tu = np.cross(self.vup, self.vpn)
# tvup is the cross product of the vpn and tu vectors
tvup = np.cross(self.vpn, tu)
# tvpn is a copy of the vpn vector
tvpn = self.vpn
# Normalize the view axes tu, tvup, and tvpn to unit length
# du, dv, and dz are all a part of the normalization process, make explicit?
# Bruce's edits: (didn't work, caused an error, and rotation/translation/scaling still work fine for axes and data)
# tu /= np.linalg.norm(tu)
# tvup /= np.linalg.norm(tvup)
# tvpn /= np.linalg.norm(tvpn)
np.linalg.norm(tu)
np.linalg.norm(tvup)
np.linalg.norm(tvpn)
# Copy the orthonormal axes tu, tvup, and tvpn back to self.u, self.vup and self.vpn
self.u = tu
self.vup = tvup
self.vpn = tvpn
# align the axes
r1 = np.matrix([[tu[0,0], tu[0, 1], tu[0, 2], 0.0],
[tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0],
[tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
vtm = r1 * vtm
# Perspective view transformation goes here
#p = np.matrix([[1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 1/d, 0]])
#p = p ####
# Translate the lower left corner of the view space to the origin. Since the axes are aligned, this is just a
# translation by half the extent of the view volume in the X and Y view axes
t2 = np.matrix([[1, 0, 0, 0.5*self.extent[0]],
[0, 1, 0, 0.5*self.extent[1]],
[0, 0, 1, 0],
[0, 0, 0, 1]])
vtm = t2 * vtm
# Use the extent and screen size values to scale to the screen
s1 = np.matrix([[-self.screen[0]/self.extent[0], 0, 0, 0],
[0, -self.screen[1]/self.extent[1], 0, 0],
[0, 0, 1.0/self.extent[2], 0],
[0, 0, 0, 1]])
vtm = s1 * vtm
# Translate the lower left corner to the origin and add the view offset, which gives a little buffer around the
# top and left edges of the window
t3 = np.matrix([[1, 0, 0, self.screen[0]+self.offset[0]],
[0, 1, 0, self.screen[1]+self.offset[1]],
[0, 0, 1, 0],
[0, 0, 0, 1]])
vtm = t3 * vtm
return vtm
def clone(self):
# make a new View object
clone = View()
# copy all fields of the current View object to the new View object
clone.vrp = np.copy(self.vrp)
clone.vpn = np.copy(self.vpn)
clone.vup = np.copy(self.vup)
clone.u = np.copy(self.u)
clone.extent = np.copy(self.extent)
clone.screen = np.copy(self.screen)
clone.offset = np.copy(self.offset)
clone.translationX = self.translationX
clone.translationY = self.translationY
return clone
def rotateVRC(self, thetaU, thetaVUP, thetaVPN):
# translate the center of rotation (the middle of the extent volume) to the origin, rotate around the Y axis,
# rotate around the X axis, then translate back by the opposite of the first translation
tvrc = np.matrix([[self.vrp[0, 0], self.vrp[0, 1], self.vrp[0, 2], 1],
[self.u[0, 0], self.u[0, 1], self.u[0, 2], 0],
[self.vup[0, 0], self.vup[0, 1], self.vup[0, 2], 0],
[self.vpn[0, 0], self.vpn[0, 1], self.vpn[0, 2], 0]])
point = np.matrix(self.vrp + self.vpn * self.extent[2] * 0.5)
t1 = np.matrix([[1, 0, 0, -point[0, 0]],
[0, 1, 0, -point[0, 1]],
[0, 0, 1, -point[0, 2]],
[0, 0, 0, 1]])
Rxyz = np.matrix([[self.u[0,0], self.u[0, 1], self.u[0, 2], 0.0],
[self.vup[0, 0], self.vup[0, 1], self.vup[0, 2], 0.0],
[self.vpn[0, 0], self.vpn[0, 1], self.vpn[0, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
r1 = np.matrix([[1, 0, 0, 0],
[0, np.cos(thetaU), -np.sin(thetaU), 0],
[0, np.sin(thetaU), np.cos(thetaU), 0],
[0, 0, 0, 1]])
r2 = np.matrix([[np.cos(thetaVUP), 0, np.sin(thetaVUP), 0],
[0, 1, 0, 0],
[-np.sin(thetaVUP), 0, np.cos(thetaVUP), 0],
[0, 0, 0, 1]])
r3 = np.matrix([[np.cos(thetaVPN), -np.sin(thetaVPN), 0, 0],
[np.sin(thetaVPN), np.cos(thetaVPN), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
t2 = np.matrix([[1, 0, 0, point[0, 0]],
[0, 1, 0, point[0, 1]],
[0, 0, 1, point[0, 2]],
[0, 0, 0, 1]])
tvrc = (t2 * Rxyz.T * r3 * r2 * r1 * Rxyz * t1 * tvrc.T).T
# Copy values from tvrc back into VRP, U, VUP, and VPN
self.vrp = tvrc[0, 0:3]
self.u = tvrc[1, 0:3]
self.vup = tvrc[2, 0:3]
self.vpn = tvrc[3, 0:3]
# Normalize U, VUP, and VPN
np.linalg.norm(self.u)
np.linalg.norm(self.vup)
np.linalg.norm(self.vpn)
|
bhwester/computer-science-projects
|
data_analysis_and_visualization_system/view.py
|
Python
|
mit
| 6,811
|
[
"Brian"
] |
b6fc1a9795420b94419d48dc02875a8ac7f7c0abd511f49b15e1e63a1b844d97
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""PhyloXML reader/parser, writer, and associated functions.
Instantiates tree elements from a parsed PhyloXML file, and constructs an XML
file from a `Bio.Phylo.PhyloXML` object.
About capitalization:
- phyloXML means the file format specification
- PhyloXML means the Biopython module `Bio.Phylo.PhyloXML` and its classes
- Phyloxml means the top-level class used by `PhyloXMLIO.read` (but not
`Bio.Phylo.read`!), containing a list of Phylogenies (objects derived from
`BaseTree.Tree`)
"""
import sys
from Bio._py3k import basestring
from Bio._py3k import unicode
from Bio.Phylo import PhyloXML as PX
# For speed try to use cElementTree rather than ElementTree
try:
if (3, 0) <= sys.version_info[:2] <= (3, 1):
# Workaround for bug in python 3.0 and 3.1,
# see http://bugs.python.org/issue9257
from xml.etree import ElementTree as ElementTree
else:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree as ElementTree
# Recognize the phyloXML namespace when parsing
# See http://effbot.org/zone/element-namespaces.htm
NAMESPACES = {
'phy': 'http://www.phyloxml.org',
}
try:
register_namespace = ElementTree.register_namespace
except AttributeError:
if not hasattr(ElementTree, '_namespace_map'):
# cElementTree needs the pure-Python xml.etree.ElementTree
from xml.etree import ElementTree as ET_py
ElementTree._namespace_map = ET_py._namespace_map
def register_namespace(prefix, uri):
ElementTree._namespace_map[uri] = prefix
for prefix, uri in NAMESPACES.items():
register_namespace(prefix, uri)
# Tell ElementTree how to write to text handles
DEFAULT_ENCODING = ("unicode" if sys.version_info[0] >= 3 else "utf-8")
class PhyloXMLError(Exception):
"""Exception raised when PhyloXML object construction cannot continue.
XML syntax errors will be found and raised by the underlying ElementTree
module; this exception is for valid XML that breaks the phyloXML
specification.
"""
pass
# ---------------------------------------------------------
# Public API
def read(file):
"""Parse a phyloXML file or stream and build a tree of Biopython objects.
The children of the root node are phylogenies and possibly other arbitrary
(non-phyloXML) objects.
:returns: a single `Bio.Phylo.PhyloXML.Phyloxml` object.
"""
return Parser(file).read()
def parse(file):
"""Iterate over the phylogenetic trees in a phyloXML file.
This ignores any additional data stored at the top level, but may be more
memory-efficient than the `read` function.
:returns: a generator of `Bio.Phylo.PhyloXML.Phylogeny` objects.
"""
return Parser(file).parse()
def write(obj, file, encoding=DEFAULT_ENCODING, indent=True):
"""Write a phyloXML file.
:Parameters:
obj
an instance of `Phyloxml`, `Phylogeny` or `BaseTree.Tree`, or an
iterable of either of the latter two. The object will be converted
to a Phyloxml object before serialization.
file
either an open handle or a file name.
"""
def fix_single(tree):
if isinstance(tree, PX.Phylogeny):
return tree
if isinstance(tree, PX.Clade):
return tree.to_phylogeny()
if isinstance(tree, PX.BaseTree.Tree):
return PX.Phylogeny.from_tree(tree)
if isinstance(tree, PX.BaseTree.Clade):
return PX.Phylogeny.from_tree(PX.BaseTree.Tree(root=tree))
else:
raise ValueError("iterable must contain Tree or Clade types")
if isinstance(obj, PX.Phyloxml):
pass
elif isinstance(obj, (PX.BaseTree.Tree, PX.BaseTree.Clade)):
obj = fix_single(obj).to_phyloxml()
elif hasattr(obj, '__iter__'):
obj = PX.Phyloxml({}, phylogenies=(fix_single(t) for t in obj))
else:
raise ValueError("First argument must be a Phyloxml, Phylogeny, "
"Tree, or iterable of Trees or Phylogenies.")
return Writer(obj).write(file, encoding=encoding, indent=indent)
# ---------------------------------------------------------
# Functions I wish ElementTree had
def _local(tag):
"""Extract the local tag from a namespaced tag name."""
if tag[0] == '{':
return tag[tag.index('}') + 1:]
return tag
def _split_namespace(tag):
"""Split a tag into namespace and local tag strings."""
try:
return tag[1:].split('}', 1)
except:
return ('', tag)
def _ns(tag, namespace=NAMESPACES['phy']):
"""Format an XML tag with the given namespace."""
return '{%s}%s' % (namespace, tag)
def _get_child_as(parent, tag, construct):
"""Find a child node by tag, and pass it through a constructor.
Returns None if no matching child is found.
"""
child = parent.find(_ns(tag))
if child is not None:
return construct(child)
def _get_child_text(parent, tag, construct=unicode):
"""Find a child node by tag; pass its text through a constructor.
Returns None if no matching child is found.
"""
child = parent.find(_ns(tag))
if child is not None and child.text:
return construct(child.text)
def _get_children_as(parent, tag, construct):
"""Find child nodes by tag; pass each through a constructor.
Returns an empty list if no matching child is found.
"""
return [construct(child) for child in
parent.findall(_ns(tag))]
def _get_children_text(parent, tag, construct=unicode):
"""Find child nodes by tag; pass each node's text through a constructor.
Returns an empty list if no matching child is found.
"""
return [construct(child.text) for child in
parent.findall(_ns(tag))
if child.text]
def _indent(elem, level=0):
"""Add line breaks and indentation to ElementTree in-place.
Sources:
- http://effbot.org/zone/element-lib.htm#prettyprint
- http://infix.se/2007/02/06/gentlemen-indent-your-xml
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
_indent(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# ---------------------------------------------------------
# INPUT
# ---------------------------------------------------------
def _str2bool(text):
if text == 'true' or text == '1':
return True
if text == 'false' or text == '0':
return False
raise ValueError('String could not be converted to boolean: ' + text)
def _dict_str2bool(dct, keys):
out = dct.copy()
for key in keys:
if key in out:
out[key] = _str2bool(out[key])
return out
def _int(text):
if text is not None:
try:
return int(text)
except Exception:
return None
def _float(text):
if text is not None:
try:
return float(text)
except Exception:
return None
def _collapse_wspace(text):
"""Replace all spans of whitespace with a single space character.
Also remove leading and trailing whitespace. See "Collapse Whitespace
Policy" in the phyloXML spec glossary:
http://phyloxml.org/documentation/version_100/phyloxml.xsd.html#Glossary
"""
if text is not None:
return ' '.join(text.split())
# NB: Not currently used
def _replace_wspace(text):
"""Replace tab, LF and CR characters with spaces, but don't collapse.
See "Replace Whitespace Policy" in the phyloXML spec glossary:
http://phyloxml.org/documentation/version_100/phyloxml.xsd.html#Glossary
"""
for char in ('\t', '\n', '\r'):
if char in text:
text = text.replace(char, ' ')
return text
class Parser(object):
"""Methods for parsing all phyloXML nodes from an XML stream.
To minimize memory use, the tree of ElementTree parsing events is cleared
after completing each phylogeny, clade, and top-level 'other' element.
Elements below the clade level are kept in memory until parsing of the
current clade is finished -- this shouldn't be a problem because clade is
the only recursive element, and non-clade nodes below this level are of
bounded size.
"""
def __init__(self, file):
# Get an iterable context for XML parsing events
context = iter(ElementTree.iterparse(file, events=('start', 'end')))
event, root = next(context)
self.root = root
self.context = context
def read(self):
"""Parse the phyloXML file and create a single Phyloxml object."""
phyloxml = PX.Phyloxml(dict((_local(key), val)
for key, val in self.root.items()))
other_depth = 0
for event, elem in self.context:
namespace, localtag = _split_namespace(elem.tag)
if event == 'start':
if namespace != NAMESPACES['phy']:
other_depth += 1
continue
if localtag == 'phylogeny':
phylogeny = self._parse_phylogeny(elem)
phyloxml.phylogenies.append(phylogeny)
if event == 'end' and namespace != NAMESPACES['phy']:
# Deal with items not specified by phyloXML
other_depth -= 1
if other_depth == 0:
# We're directly under the root node -- evaluate
otr = self.other(elem, namespace, localtag)
phyloxml.other.append(otr)
self.root.clear()
return phyloxml
def parse(self):
"""Parse the phyloXML file incrementally and return each phylogeny."""
phytag = _ns('phylogeny')
for event, elem in self.context:
if event == 'start' and elem.tag == phytag:
yield self._parse_phylogeny(elem)
# Special parsing cases -- incremental, using self.context
def _parse_phylogeny(self, parent):
"""Parse a single phylogeny within the phyloXML tree.
Recursively builds a phylogenetic tree with help from parse_clade, then
clears the XML event history for the phylogeny element and returns
control to the top-level parsing function.
"""
phylogeny = PX.Phylogeny(**_dict_str2bool(parent.attrib,
['rooted', 'rerootable']))
list_types = {
# XML tag, plural attribute
'confidence': 'confidences',
'property': 'properties',
'clade_relation': 'clade_relations',
'sequence_relation': 'sequence_relations',
}
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'start' and tag == 'clade':
assert phylogeny.root is None, \
"Phylogeny object should only have 1 clade"
phylogeny.root = self._parse_clade(elem)
continue
if event == 'end':
if tag == 'phylogeny':
parent.clear()
break
# Handle the other non-recursive children
if tag in list_types:
getattr(phylogeny, list_types[tag]).append(
getattr(self, tag)(elem))
# Complex types
elif tag in ('date', 'id'):
setattr(phylogeny, tag, getattr(self, tag)(elem))
# Simple types
elif tag in ('name', 'description'):
setattr(phylogeny, tag, _collapse_wspace(elem.text))
# Unknown tags
elif namespace != NAMESPACES['phy']:
phylogeny.other.append(self.other(elem, namespace, tag))
parent.clear()
else:
# NB: This shouldn't happen in valid files
raise PhyloXMLError('Misidentified tag: ' + tag)
return phylogeny
_clade_complex_types = ['color', 'events', 'binary_characters', 'date']
_clade_list_types = {
'confidence': 'confidences',
'distribution': 'distributions',
'reference': 'references',
'property': 'properties',
}
_clade_tracked_tags = set(_clade_complex_types).union(_clade_list_types.keys()).union(
['branch_length', 'name', 'node_id', 'width'])
def _parse_clade(self, parent):
"""Parse a Clade node and its children, recursively."""
clade = PX.Clade(**parent.attrib)
if clade.branch_length is not None:
clade.branch_length = float(clade.branch_length)
# NB: Only evaluate nodes at the current level
tag_stack = []
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'start':
if tag == 'clade':
clade.clades.append(self._parse_clade(elem))
continue
if tag == 'taxonomy':
clade.taxonomies.append(self._parse_taxonomy(elem))
continue
if tag == 'sequence':
clade.sequences.append(self._parse_sequence(elem))
continue
if tag in self._clade_tracked_tags:
tag_stack.append(tag)
if event == 'end':
if tag == 'clade':
elem.clear()
break
if tag != tag_stack[-1]:
continue
tag_stack.pop()
# Handle the other non-recursive children
if tag in self._clade_list_types:
getattr(clade, self._clade_list_types[tag]).append(
getattr(self, tag)(elem))
elif tag in self._clade_complex_types:
setattr(clade, tag, getattr(self, tag)(elem))
elif tag == 'branch_length':
# NB: possible collision with the attribute
if clade.branch_length is not None:
raise PhyloXMLError(
'Attribute branch_length was already set '
'for this Clade.')
clade.branch_length = _float(elem.text)
elif tag == 'width':
clade.width = _float(elem.text)
elif tag == 'name':
clade.name = _collapse_wspace(elem.text)
elif tag == 'node_id':
clade.node_id = PX.Id(elem.text.strip(),
elem.attrib.get('provider'))
elif namespace != NAMESPACES['phy']:
clade.other.append(self.other(elem, namespace, tag))
elem.clear()
else:
raise PhyloXMLError('Misidentified tag: ' + tag)
return clade
def _parse_sequence(self, parent):
sequence = PX.Sequence(**parent.attrib)
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'end':
if tag == 'sequence':
parent.clear()
break
if tag in ('accession', 'mol_seq', 'uri',
'domain_architecture'):
setattr(sequence, tag, getattr(self, tag)(elem))
elif tag == 'annotation':
sequence.annotations.append(self.annotation(elem))
elif tag == 'name':
sequence.name = _collapse_wspace(elem.text)
elif tag in ('symbol', 'location'):
setattr(sequence, tag, elem.text)
elif namespace != NAMESPACES['phy']:
sequence.other.append(self.other(elem, namespace, tag))
parent.clear()
return sequence
def _parse_taxonomy(self, parent):
taxonomy = PX.Taxonomy(**parent.attrib)
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'end':
if tag == 'taxonomy':
parent.clear()
break
if tag in ('id', 'uri'):
setattr(taxonomy, tag, getattr(self, tag)(elem))
elif tag == 'common_name':
taxonomy.common_names.append(_collapse_wspace(elem.text))
elif tag == 'synonym':
taxonomy.synonyms.append(elem.text)
elif tag in ('code', 'scientific_name', 'authority', 'rank'):
# ENH: check_str on rank
setattr(taxonomy, tag, elem.text)
elif namespace != NAMESPACES['phy']:
taxonomy.other.append(self.other(elem, namespace, tag))
parent.clear()
return taxonomy
def other(self, elem, namespace, localtag):
return PX.Other(localtag, namespace, elem.attrib,
value=elem.text and elem.text.strip() or None,
children=[self.other(child, *_split_namespace(child.tag))
for child in elem])
# Complex types
def accession(self, elem):
return PX.Accession(elem.text.strip(), elem.get('source'))
def annotation(self, elem):
return PX.Annotation(
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
confidence=_get_child_as(elem, 'confidence', self.confidence),
properties=_get_children_as(elem, 'property', self.property),
uri=_get_child_as(elem, 'uri', self.uri),
**elem.attrib)
def binary_characters(self, elem):
def bc_getter(elem):
return _get_children_text(elem, 'bc')
return PX.BinaryCharacters(
type=elem.get('type'),
gained_count=_int(elem.get('gained_count')),
lost_count=_int(elem.get('lost_count')),
present_count=_int(elem.get('present_count')),
absent_count=_int(elem.get('absent_count')),
# Flatten BinaryCharacterList sub-nodes into lists of strings
gained=_get_child_as(elem, 'gained', bc_getter),
lost=_get_child_as(elem, 'lost', bc_getter),
present=_get_child_as(elem, 'present', bc_getter),
absent=_get_child_as(elem, 'absent', bc_getter))
def clade_relation(self, elem):
return PX.CladeRelation(
elem.get('type'), elem.get('id_ref_0'), elem.get('id_ref_1'),
distance=elem.get('distance'),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def color(self, elem):
red, green, blue = (_get_child_text(elem, color, int) for color in
('red', 'green', 'blue'))
return PX.BranchColor(red, green, blue)
def confidence(self, elem):
return PX.Confidence(
_float(elem.text),
elem.get('type'))
def date(self, elem):
return PX.Date(
unit=elem.get('unit'),
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
value=_get_child_text(elem, 'value', float),
minimum=_get_child_text(elem, 'minimum', float),
maximum=_get_child_text(elem, 'maximum', float),
)
def distribution(self, elem):
return PX.Distribution(
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
points=_get_children_as(elem, 'point', self.point),
polygons=_get_children_as(elem, 'polygon', self.polygon))
def domain(self, elem):
return PX.ProteinDomain(elem.text.strip(),
int(elem.get('from')) - 1,
int(elem.get('to')),
confidence=_float(elem.get('confidence')),
id=elem.get('id'))
def domain_architecture(self, elem):
return PX.DomainArchitecture(
length=int(elem.get('length')),
domains=_get_children_as(elem, 'domain', self.domain))
def events(self, elem):
return PX.Events(
type=_get_child_text(elem, 'type'),
duplications=_get_child_text(elem, 'duplications', int),
speciations=_get_child_text(elem, 'speciations', int),
losses=_get_child_text(elem, 'losses', int),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def id(self, elem):
provider = elem.get('provider') or elem.get('type')
return PX.Id(elem.text.strip(), provider)
def mol_seq(self, elem):
is_aligned = elem.get('is_aligned')
if is_aligned is not None:
is_aligned = _str2bool(is_aligned)
return PX.MolSeq(elem.text.strip(), is_aligned=is_aligned)
def point(self, elem):
return PX.Point(
elem.get('geodetic_datum'),
_get_child_text(elem, 'lat', float),
_get_child_text(elem, 'long', float),
alt=_get_child_text(elem, 'alt', float),
alt_unit=elem.get('alt_unit'))
def polygon(self, elem):
return PX.Polygon(
points=_get_children_as(elem, 'point', self.point))
def property(self, elem):
return PX.Property(elem.text.strip(),
elem.get('ref'),
elem.get('applies_to'),
elem.get('datatype'),
unit=elem.get('unit'),
id_ref=elem.get('id_ref'))
def reference(self, elem):
return PX.Reference(
doi=elem.get('doi'),
desc=_get_child_text(elem, 'desc'))
def sequence_relation(self, elem):
return PX.SequenceRelation(
elem.get('type'), elem.get('id_ref_0'), elem.get('id_ref_1'),
distance=_float(elem.get('distance')),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def uri(self, elem):
return PX.Uri(elem.text.strip(),
desc=_collapse_wspace(elem.get('desc')),
type=elem.get('type'))
# ---------------------------------------------------------
# OUTPUT
# ---------------------------------------------------------
def _serialize(value):
"""Convert a Python primitive to a phyloXML-compatible Unicode string."""
if isinstance(value, float):
return unicode(value).upper()
elif isinstance(value, bool):
return unicode(value).lower()
return unicode(value)
def _clean_attrib(obj, attrs):
"""Create a dictionary from an object's specified, non-None attributes."""
out = {}
for key in attrs:
val = getattr(obj, key)
if val is not None:
out[key] = _serialize(val)
return out
def _handle_complex(tag, attribs, subnodes, has_text=False):
def wrapped(self, obj):
elem = ElementTree.Element(tag, _clean_attrib(obj, attribs))
for subn in subnodes:
if isinstance(subn, basestring):
# singular object: method and attribute names are the same
if getattr(obj, subn) is not None:
elem.append(getattr(self, subn)(getattr(obj, subn)))
else:
# list: singular method, pluralized attribute name
method, plural = subn
for item in getattr(obj, plural):
elem.append(getattr(self, method)(item))
if has_text:
elem.text = _serialize(obj.value)
return elem
wrapped.__doc__ = "Serialize a %s and its subnodes, in order." % tag
return wrapped
def _handle_simple(tag):
def wrapped(self, obj):
elem = ElementTree.Element(tag)
elem.text = _serialize(obj)
return elem
wrapped.__doc__ = "Serialize a simple %s node." % tag
return wrapped
class Writer(object):
"""Methods for serializing a PhyloXML object to XML."""
def __init__(self, phyloxml):
"""Build an ElementTree from a PhyloXML object."""
assert isinstance(phyloxml, PX.Phyloxml), "Not a Phyloxml object"
self._tree = ElementTree.ElementTree(self.phyloxml(phyloxml))
def write(self, file, encoding=DEFAULT_ENCODING, indent=True):
if indent:
_indent(self._tree.getroot())
self._tree.write(file, encoding)
return len(self._tree.getroot())
# Convert classes to ETree elements
def phyloxml(self, obj):
elem = ElementTree.Element('phyloxml', obj.attributes) # Namespaces
for tree in obj.phylogenies:
elem.append(self.phylogeny(tree))
for otr in obj.other:
elem.append(self.other(otr))
return elem
def other(self, obj):
elem = ElementTree.Element(_ns(obj.tag, obj.namespace), obj.attributes)
elem.text = obj.value
for child in obj.children:
elem.append(self.other(child))
return elem
phylogeny = _handle_complex('phylogeny',
('rooted', 'rerootable',
'branch_length_unit', 'type'),
('name',
'id',
'description',
'date',
('confidence', 'confidences'),
'clade',
('clade_relation', 'clade_relations'),
('sequence_relation',
'sequence_relations'),
('property', 'properties'),
('other', 'other'),
))
clade = _handle_complex('clade', ('id_source',),
('name',
'branch_length',
('confidence', 'confidences'),
'width',
'color',
'node_id',
('taxonomy', 'taxonomies'),
('sequence', 'sequences'),
'events',
'binary_characters',
('distribution', 'distributions'),
'date',
('reference', 'references'),
('property', 'properties'),
('clade', 'clades'),
('other', 'other'),
))
accession = _handle_complex('accession', ('source',),
(), has_text=True)
annotation = _handle_complex('annotation',
('ref', 'source', 'evidence', 'type'),
('desc',
'confidence',
('property', 'properties'),
'uri',
))
def binary_characters(self, obj):
"""Serialize a binary_characters node and its subnodes."""
elem = ElementTree.Element('binary_characters',
_clean_attrib(obj,
('type', 'gained_count', 'lost_count',
'present_count', 'absent_count')))
for subn in ('gained', 'lost', 'present', 'absent'):
subelem = ElementTree.Element(subn)
for token in getattr(obj, subn):
subelem.append(self.bc(token))
elem.append(subelem)
return elem
clade_relation = _handle_complex('clade_relation',
('id_ref_0', 'id_ref_1',
'distance', 'type'),
('confidence',))
color = _handle_complex('color', (), ('red', 'green', 'blue'))
confidence = _handle_complex('confidence', ('type',),
(), has_text=True)
date = _handle_complex('date', ('unit',),
('desc', 'value', 'minimum', 'maximum'))
distribution = _handle_complex('distribution', (),
('desc',
('point', 'points'),
('polygon', 'polygons'),
))
def domain(self, obj):
"""Serialize a domain node."""
elem = ElementTree.Element('domain',
{'from': str(obj.start + 1), 'to': str(obj.end)})
if obj.confidence is not None:
elem.set('confidence', _serialize(obj.confidence))
if obj.id is not None:
elem.set('id', obj.id)
elem.text = _serialize(obj.value)
return elem
domain_architecture = _handle_complex('domain_architecture',
('length',),
(('domain', 'domains'),))
events = _handle_complex('events', (),
('type',
'duplications',
'speciations',
'losses',
'confidence',
))
id = _handle_complex('id', ('provider',), (), has_text=True)
mol_seq = _handle_complex('mol_seq', ('is_aligned',),
(), has_text=True)
node_id = _handle_complex('node_id', ('provider',), (), has_text=True)
point = _handle_complex('point', ('geodetic_datum', 'alt_unit'),
('lat', 'long', 'alt'))
polygon = _handle_complex('polygon', (), (('point', 'points'),))
property = _handle_complex('property',
('ref', 'unit', 'datatype',
'applies_to', 'id_ref'),
(), has_text=True)
reference = _handle_complex('reference', ('doi',), ('desc',))
sequence = _handle_complex('sequence',
('type', 'id_ref', 'id_source'),
('symbol',
'accession',
'name',
'location',
'mol_seq',
'uri',
('annotation', 'annotations'),
'domain_architecture',
('other', 'other'),
))
sequence_relation = _handle_complex('sequence_relation',
('id_ref_0', 'id_ref_1',
'distance', 'type'),
('confidence',))
taxonomy = _handle_complex('taxonomy',
('id_source',),
('id',
'code',
'scientific_name',
'authority',
('common_name', 'common_names'),
('synonym', 'synonyms'),
'rank',
'uri',
('other', 'other'),
))
uri = _handle_complex('uri', ('desc', 'type'), (), has_text=True)
# Primitive types
# Floating point
alt = _handle_simple('alt')
branch_length = _handle_simple('branch_length')
lat = _handle_simple('lat')
long = _handle_simple('long')
maximum = _handle_simple('maximum')
minimum = _handle_simple('minimum')
value = _handle_simple('value')
width = _handle_simple('width')
# Integers
blue = _handle_simple('blue')
duplications = _handle_simple('duplications')
green = _handle_simple('green')
losses = _handle_simple('losses')
red = _handle_simple('red')
speciations = _handle_simple('speciations')
# Strings
bc = _handle_simple('bc')
code = _handle_simple('code')
common_name = _handle_simple('common_name')
desc = _handle_simple('desc')
description = _handle_simple('description')
location = _handle_simple('location')
name = _handle_simple('name')
rank = _handle_simple('rank')
scientific_name = _handle_simple('scientific_name')
symbol = _handle_simple('symbol')
synonym = _handle_simple('synonym')
type = _handle_simple('type')
|
zjuchenyuan/BioWeb
|
Lib/Bio/Phylo/PhyloXMLIO.py
|
Python
|
mit
| 33,305
|
[
"Biopython"
] |
3fbfbd6a3f49089d13f92cdeb4dfe687c6770a4a215d9509a07a39526bfa304b
|
import numpy as np
from scipy import stats
from scipy.special import expit
from scipy.stats import multivariate_normal
def exp_cosh(H):
beta = 1.0
return 0.5 * np.exp(beta * H)/np.cosh(beta * H)
def gaussian(H):
#import ipdb; ipdb.set_trace()
a = 1
cov = np.diag(np.repeat(a, H.shape[1]))
return np.random.multivariate_normal(H[0], cov)
def kinetic_ising_model(S, J, energy_function):
""" Returns probabilities of S[t+1,n] being one.
:param S: numpy.ndarray (T,N)
Binary data where an entry is either 1 ('spike') or -1 ('silence').
:param J: numpy.ndarray (N, N)
Coupling matrix
:return: numpy.ndarray (T,N)
Probabilities that at time point t+1 neuron n fires
"""
# compute fields
H = compute_fields(S, J)
# If a string was passed as the energy function use the function that is mapped to it
string_to_func = {'exp_cosh': exp_cosh, 'gaussian': gaussian, 'logistic': expit}
if energy_function in string_to_func.keys():
energy_function = string_to_func[energy_function]
# compute probabilities
p = energy_function(H)
# return
return p
def compute_fields(S, J):
""" Computes the fields for given data and couplings
:param S: numpy.ndarray (T,N)
Binary data where an entry is either 1 ('spike') or -1 ('silence').
:param J: numpy.ndarray (N, N)
Coupling matrix.
:return: numpy.ndarray (T,N)
Fields at time point t+1 on neuron n
"""
# compute
H = np.dot(S, J)
return H
def spike_and_slab(ro, N, bias, v_s=1.0, bias_mean=0):
''' This function generate spike and priors
:param ro: sparsity
:param N: number of neurons
:param bias: 1 if bias is included in the model, 0 other wise
:return:
'''
gamma = stats.bernoulli.rvs(p=ro, size=(N + bias, N))
normal_dist = np.random.normal(0.0, v_s, (N + bias, N))
if bias:
gamma[N, :] = 1
normal_dist[N, :] = np.random.normal(bias_mean, v_s, N)
return gamma * normal_dist
def generate_spikes(N, T, S0, J, energy_function, bias, no_spike=-1):
""" Generates spike data according to kinetic Ising model
:param J: numpy.ndarray (N, N)
Coupling matrix.
:param T: int
Length of trajectory that is generated.
:param S0: numpy.ndarray (N)
Initial pattern that is sampling started from.
:param bias: 1 if bias is included in the model. 0 other wise.
:param no_spike: what number should represent 'no_spike'. Default is -1.
:return: numpy.ndarray (T, N)
Binary data where an entry is either 1 ('spike') or -1 ('silence'). First row is only ones for external fields.
"""
# Initialize array for data
S = np.empty([T, N + bias])
# Set initial spike pattern
S[0] = S0 if no_spike == -1 else np.zeros(N + bias)
# Last column in the activity matrix is of the bias and should be 1 at all times
if bias:
S[:, N] = 1
# Generate random numbers
X = np.random.rand(T - 1, N)
#X = np.random.normal(size=(T-1, N))
# Iterate through all time points
for i in range(1, T):
# Compute probabilities of neuron firing
p = kinetic_ising_model(np.array([S[i - 1]]), J, energy_function)
if energy_function == 'gaussian':
S[i, :N] = p
else:
# Check if spike or not
if no_spike == -1:
S[i, :N] = 2 * (X[i - 1] < p) - 1
else:
S[i, :N] = 2 * (X[i - 1] < p) / 2.0
return S
|
noashin/kinetic_ising_model_neurons
|
spikes_activity_generator.py
|
Python
|
mit
| 3,550
|
[
"Gaussian",
"NEURON"
] |
2403154facdd0b0cdb34376fd2305c4e97546f4e3516ccc1689c65430660c81c
|
"""
Base file for YAPWAF application
"""
import yapwaf as Y
import conf.env as env
import conf.db as db
import conf.routes as routes
# For testing purposes, add a user to the database
from app.models.user import User
Y.Base.metadata.create_all(Y.engine)
user = User('Brian', 'Stack')
Y.session.add(user)
Y.session.commit()
app = Y.App(env, db, routes)
|
bis12/yapwaf
|
examples/hello/hello.py
|
Python
|
mit
| 354
|
[
"Brian"
] |
12542c5526ae73a5a301d7ac71da9019d2583b45ee153e19970c1ecbd9578915
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 30 20:08:47 2016
@author: boldingd
"""
import SnnBase
import Stdp
import DopamineStdp
import random
class Cluster:
def __init__(self):
self.neurons = []
def add_neuron(self, neuron):
self.neurons.append(neuron)
def create_pulsar_cluster(count, total_power, freq_min, freq_max):
if count < 1:
raise ValueError("Pulsar count must be positive")
freqs = SnnBase.linspace(freq_min, freq_max, count) # will throw if freqs are wrong
c = Cluster()
per_pulsar_power = total_power / count
for freq in freqs:
per_pulse_power = per_pulsar_power / freq # # forgot to split power up over pulses
p = SnnBase.Pulsar(per_pulse_power, freq)
c.add_neuron(p)
return c
def create_spiking_cluster(count, threshold, magnitude, leak_eql, leak_tau):
c = Cluster()
for _ in range(count):
sn = SnnBase.SpikingNeuron(threshold, magnitude, leak_eql, leak_tau)
c.add_neuron(sn)
return c
def create_poisson_cluster(count, total_power, freq_min, freq_max):
if count < 1:
raise ValueError("Pulsar count must be positive")
freqs = SnnBase.linspace(freq_min, freq_max, count)
c = Cluster()
per_spiker_power = total_power / count # divide total power of spikers
for freq in freqs:
per_spike_power = per_spiker_power / freq # divide spiker power over pulses
p = SnnBase.PoissonSpiker(per_spike_power, freq)
c.add_neuron(p)
return c
class BasicSynapseConnector:
def __init__(self, delay, min_efficiency, max_efficiency):
self.delay = delay
self.min_efficiency = min_efficiency
self.max_efficiency = max_efficiency
def connect(self, source, target):
e = random.uniform(self.min_efficiency, self.max_efficiency)
syn = SnnBase.Synapse.connect(source=source, target=target, delay=self.delay, efficiency=e)
return syn
class StdpSynapseConnector:
def __init__(self, delay, min_efficiency, max_efficiency):
self.delay = delay
self.min_efficiency = min_efficiency
self.max_efficiency = max_efficiency
def connect(self, source, target):
e = random.uniform(self.min_efficiency, self.max_efficiency)
syn = Stdp.StdpSynapse.connect(source=source, target=target, delay=self.delay, efficiency=e, min_efficiency=self.min_efficiency, max_efficiency=self.max_efficiency)
return syn
class DopamineStdpSynapseConnector:
def __init__(self, delay, min_efficiency, max_efficiency, reward_manager):
self.delay = delay
self.min_efficiency = min_efficiency
self.max_efficiency = max_efficiency
self.reward_manager = reward_manager
def connect(self, source, target):
e = random.uniform(self.min_efficiency, self.max_efficiency)
syn = DopamineStdp.DopamineStdpSynapse.connect(source=source, target=target, delay=self.delay, efficiency=e, min_efficiency=self.min_efficiency, max_efficiency=self.max_efficiency, reward_manager=self.reward_manager)
return syn
# NOTE: Network manages connection, but not state. For now, just yield your entities and let something else run the sim
class Network:
def __init__(self):
self.clusters = []
self.synapses = []
def get_new_cluster(self):
"""add a new cluster and return it.
it's assumed the user will populate it externally.
"""
c = Cluster()
self.clusters.append(c)
return c
def add_cluster(self, cluster):
if cluster in self.clusters:
raise ValueError("Network already contains cluster")
self.clusters.append(cluster)
def connect_clusters(self, source_cluster, target_cluster, connector):
if source_cluster not in self.clusters:
raise ValueError("source cluster must be in this network")
if target_cluster not in self.clusters:
raise ValueError("target cluster must be in this network")
if len(source_cluster.neurons) < 1:
raise ValueError("Source cluster is empty")
if len(target_cluster.neurons) < 1:
raise ValueError("Target cluster is empty")
for source in source_cluster.neurons:
for target in target_cluster.neurons:
syn = connector.connect(source, target)
self.synapses.append(syn)
def get_entities(self):
entities = []
for cluster in self.clusters:
entities += cluster.neurons
entities += self.synapses
return entities
# TODO: could track synapses more closely
# TODO: could keep track of which clusters have had synapses attached and prevent operations that don't make sense
|
boldingd/BadSnn
|
SpikingNetwork.py
|
Python
|
bsd-3-clause
| 5,110
|
[
"NEURON"
] |
f1d5aae7be79db61179678bc0c4adb2776a8bf04e79e603a21295c18effc4a59
|
#!/usr/bin/env python -i
infile = 'in.simple'
me = 0
from lammps import lammps
lmp = lammps()
# run infile one line at a time
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
lmp.command("run 10")
x = lmp.gather_atoms("x",1,3)
epsilon = 0.1
x[0] += epsilon
lmp.scatter_atoms("x",1,3,x)
lmp.command("run 1");
f = lmp.extract_atom("f",3)
print "Force on 1 atom via extract_atom: ",f[0][0]
fx = lmp.extract_variable("fx","all",1)
print "Force on 1 atom via extract_variable:",fx[0]
|
crtrott/lammps
|
test/lib_python/in.simple-001.py
|
Python
|
gpl-2.0
| 512
|
[
"LAMMPS"
] |
a3c77a5845dcc1cce388c70940a2bfedfb3a551f7465d4a49266f53ba2cf364b
|
#! /usr/bin/env python
# CSS Localiser (CSS Localizer)
# Version 1.0
#
# Copyright (C) Ben McGinnes, 2014
# Email: ben@adversary.org
# GPG Key: 0x321E4E2373590E5D
# License: GPLv3 or any later version
# Website: https://github.com/adversary-org/stylish-styles
# Bitcoin: 17VvZDeLjhiH1ccU6rZWLZc41UiZd8eh6F
#
# Takes a file, downloads all the GIFs, JPGs and PNGs, then copies or
# moves them to a custom directory (here specific to the Stylish
# extension) and rewrites the file with the images local URLs called.
# Intended for use with CSS files and specifically for Stylish themes,
# but will run on any file it can open (though it includes the CSS
# data tags, so it might not help with other file types really).
#
# Since it's basically the same as localise.py except without the
# base64 encoding, the very slight name change seemed appropriate.
#
# Written and tested with Python 2.7, probably works with some earlier
# versions and will almost certainly require some degree of
# modification to work with Python 3. There are a ridiculous number
# of references to The Chronicles of Amber by Roger Zelazny too, do
# yourself a favour and go read that instead of this code.
#
# Usage: ./localize.py your_theme.css
#
# Output will be: new_your_theme.css and the image files downloaded to
# the specified directory (a hidden folder on Linux and OS X).
#
#
# CSS Localizer: downloads, coverts and embeds images as base64 data.
# Copyright (C) 2014 Ben McGinnes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The license is available in the GitHub repository along with the
# rest of my CSS theme work:
#
# https://github.com/adversary-org/stylish-styles/blob/master/LICENSE.txt
##
import os
import os.path
import re
import requests
import sys
sa = sys.argv[1]
home = os.path.expanduser("~")
windir = "\\Stylish\\"
nixdir = "/.stylish/"
curdir = os.path.abspath(".")
infile = os.path.abspath(sa)
outfile = os.path.abspath(curdir + "/new_" + sa)
if sys.platform is "linux" or "linux2" or "darwin":
styledir = home + nixdir
themedir = styledir + sa.replace(".", "-") + "/"
elif sys.platform is "win32":
styledir = home + windir
themedir = styledir + sa.replace(".", "-") + "\\"
else:
styledir = home + nixdir
themedir = styledir + sa.replace(".", "-") + "/"
if os.path.exists(styledir) is not True:
os.mkdir(styledir)
else:
pass
if os.path.exists(themedir) is not True:
os.mkdir(themedir)
else:
pass
f = open(infile, "r")
lines = f.readlines()
f.close()
unicorn = []
for line in lines:
pattern = re.findall(r"(https?://[^\s]+)", line)
if len(pattern) > 1:
for horn in pattern:
if horn.endswith(")"):
unicorn.append(horn[0:len(horn) - 1])
elif horn.endswith(");"):
unicorn.append(horn[0:len(horn) - 2])
else:
unicorn.append(horn)
elif len(pattern) == 1:
jewel = pattern[0]
if jewel.endswith(")"):
unicorn.append(jewel[0:len(jewel) - 1])
elif jewel.endswith(");"):
unicorn.append(jewel[0:len(jewel) - 2])
else:
unicorn.append(jewel)
amber = []
for blood in unicorn:
if len(blood) == 0:
unicorn.remove(blood)
elif blood.lower().endswith(".jpg"):
amber.append(blood)
elif blood.lower().endswith(".jpeg"):
amber.append(blood)
elif blood.lower().endswith(".gif"):
amber.append(blood)
elif blood.lower().endswith(".png"):
amber.append(blood)
else:
pass
la = len(amber)
patternfall = {}
for i in range(la):
order = amber[i]
head, tail = os.path.split(order)
rorder = requests.get(order)
if rorder.status_code == 200:
rtype = rorder.headers["content-type"]
if rtype.startswith("image"):
rhead = "file:///"
pit = themedir + tail
chaos = rhead + pit
trump = open(pit, "wb")
trump.write(rorder.content)
trump.close()
patternfall[order] = chaos
with open(infile, "r") as abyss:
with open(outfile, "w") as logrus:
for serpent in abyss:
for order in patternfall:
if order in serpent:
serpent = re.sub(order, patternfall[order], serpent)
logrus.write(serpent)
logrus.close()
abyss.close()
else:
pass
else:
pass
|
adversary-org/stylish-styles
|
localise/localize.py
|
Python
|
gpl-3.0
| 5,123
|
[
"Amber"
] |
ad7435faa40e3360560bce7f846c06d486cb9cd4e09861db5022af3f73849f04
|
import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import vtk
from vtk.util.colors import peacock, tomato, red, white, black
from ExodusActor import ExodusActor
from ClippedActor import ClippedActor
from MeshRenderer import MeshRenderer
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class ExodusMap:
# These are the blocks from the multiblockdataset that correspond to each item
element_vtk_block = 0
sideset_vtk_block = 4
nodeset_vtk_block = 7
class ExodusRenderer(MeshRenderer):
def __init__(self, render_widget, mesh_item_data):
MeshRenderer.__init__(self, render_widget, mesh_item_data)
self.file_name = mesh_item_data['file']
self.buildActors(self.file_name)
def buildActors(self, file_name):
reader = vtk.vtkExodusIIReader()
reader.SetFileName(self.file_name)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.EDGE_SET, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.SIDE_SET, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODE_SET, 1)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL_TEMPORAL, 1)
reader.UpdateInformation()
reader.SetObjectStatus(vtk.vtkExodusIIReader.NODE_SET, 0, 1)
num_sidesets = reader.GetNumberOfSideSetArrays()
num_nodesets = reader.GetNumberOfNodeSetArrays()
num_blocks = reader.GetNumberOfElementBlockArrays()
self.sidesets = []
self.sideset_id_to_exodus_block = {}
self.sideset_id_to_name = {}
self.name_to_sideset_id = {}
for i in xrange(num_sidesets):
sideset_id = reader.GetObjectId(vtk.vtkExodusIIReader.SIDE_SET,i)
self.sidesets.append(sideset_id)
self.sideset_id_to_exodus_block[sideset_id] = i
reader.SetObjectStatus(vtk.vtkExodusIIReader.SIDE_SET, i, 1)
name = reader.GetObjectName(vtk.vtkExodusIIReader.SIDE_SET,i).split(' ')
if 'Unnamed' not in name:
self.sideset_id_to_name[sideset_id] = name[0]
self.name_to_sideset_id[name[0]] = sideset_id
self.nodesets = []
self.nodeset_id_to_exodus_block = {}
self.nodeset_id_to_name = {}
self.name_to_nodeset_id = {}
for i in xrange(num_nodesets):
nodeset_id = reader.GetObjectId(vtk.vtkExodusIIReader.NODE_SET,i)
self.nodesets.append(nodeset_id)
self.nodeset_id_to_exodus_block[nodeset_id] = i
reader.SetObjectStatus(vtk.vtkExodusIIReader.NODE_SET, i, 1)
name = reader.GetObjectName(vtk.vtkExodusIIReader.NODE_SET,i).split(' ')
if 'Unnamed' not in name:
self.nodeset_id_to_name[nodeset_id] = name[0]
self.name_to_nodeset_id[name[0]] = nodeset_id
self.blocks = []
self.block_id_to_exodus_block = {}
self.block_id_to_name = {}
self.name_to_block_id = {}
for i in xrange(num_blocks):
block_id = reader.GetObjectId(vtk.vtkExodusIIReader.ELEM_BLOCK,i)
self.blocks.append(block_id)
self.block_id_to_exodus_block[block_id] = i
name = reader.GetObjectName(vtk.vtkExodusIIReader.ELEM_BLOCK,i).split(' ')
if 'Unnamed' not in name:
self.block_id_to_name[block_id] = name[0]
self.name_to_block_id[name[0]] = block_id
reader.SetTimeStep(1)
reader.Update()
self.data = reader.GetOutput()
for i in xrange(num_sidesets):
actor = ExodusActor(self.renderer, self.data, ExodusMap.sideset_vtk_block, i)
self.sideset_actors[str(self.sidesets[i])] = actor
self.all_actors.append(actor)
clipped_actor = ClippedActor(actor, self.plane)
self.clipped_sideset_actors[str(self.sidesets[i])] = clipped_actor
self.all_actors.append(clipped_actor)
for i in xrange(num_nodesets):
actor = ExodusActor(self.renderer, self.data, ExodusMap.nodeset_vtk_block, i)
self.nodeset_actors[str(self.nodesets[i])] = actor
self.all_actors.append(actor)
clipped_actor = ClippedActor(actor, self.plane)
self.clipped_nodeset_actors[str(self.nodesets[i])] = clipped_actor
self.all_actors.append(clipped_actor)
for i in xrange(num_blocks):
actor = ExodusActor(self.renderer, self.data, ExodusMap.element_vtk_block, i)
self.block_actors[str(self.blocks[i])] = actor
self.all_actors.append(actor)
clipped_actor = ClippedActor(actor, self.plane)
self.clipped_block_actors[str(self.blocks[i])] = clipped_actor
self.all_actors.append(clipped_actor)
|
gleicher27/Tardigrade
|
moose/gui/vtk/ExodusRenderer.py
|
Python
|
lgpl-2.1
| 4,695
|
[
"VTK"
] |
b24b0d254d86d4a1f446555da6a40b3702bc815bc47f47624677357fe8f9b8d3
|
"""
Copyright (c) 2015 Andreea Georgescu
Created on Wed Nov 19 00:18:55 2014
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from __future__ import division
import numpy as np
pi = np.pi
name = "DAMA2010Na"
modulated = True
energy_resolution_type = "Gaussian"
def EnergyResolution(e):
return 0.448 * np.sqrt(e) + 0.0091 * e
FFSD = 'GaussianFFSD'
FFSI = 'HelmFF'
FF = {'SI': FFSI,
'SDPS': FFSD,
'SDAV': FFSD,
}
target_nuclide_AZC_list = np.array([[23, 11, 0.153373]])
target_nuclide_JSpSn_list = \
np.array([[3./2, 0.2477 * np.sqrt(5./3 / pi), .0198 * np.sqrt(5./3 / pi)]])
target_nuclide_mass_list = np.array([21.4148])
num_target_nuclides = target_nuclide_mass_list.size
def QuenchingFactor(e):
return 0.4 * np.ones_like(e)
def QuenchingFactorOfEee(e):
return QuenchingFactor(e) # since it's a constant function
Ethreshold = 2.
Emaximum = 1000.
ERmaximum = 2500.
def Efficiency_ER(er):
return np.ones_like(er)
def Efficiency(e):
return np.array(1.) if Ethreshold <= e < Emaximum else np.array(0.)
Exposure = 1.33 * 1000 * 365.25
ERecoilList = np.array([])
BinEdges = np.array([2., 6., 10.])
BinData = np.array([0.0116, 0.000])
BinError = np.array([0.0013])
|
SamWitte/Codds_DarkMatter
|
src/Data/DAMA2010Na.py
|
Python
|
gpl-2.0
| 1,823
|
[
"Gaussian"
] |
ee883136419b3df0148dcdaefb6c782dd510131056edca549e0553a7307eeeeb
|
"""Support for package tracking sensors from 17track.net."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LOCATION,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION_COUNTRY = "destination_country"
ATTR_FRIENDLY_NAME = "friendly_name"
ATTR_INFO_TEXT = "info_text"
ATTR_ORIGIN_COUNTRY = "origin_country"
ATTR_PACKAGES = "packages"
ATTR_PACKAGE_TYPE = "package_type"
ATTR_STATUS = "status"
ATTR_TRACKING_INFO_LANGUAGE = "tracking_info_language"
ATTR_TRACKING_NUMBER = "tracking_number"
CONF_SHOW_ARCHIVED = "show_archived"
CONF_SHOW_DELIVERED = "show_delivered"
DATA_PACKAGES = "package_data"
DATA_SUMMARY = "summary_data"
DEFAULT_ATTRIBUTION = "Data provided by 17track.net"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
UNIQUE_ID_TEMPLATE = "package_{0}_{1}"
ENTITY_ID_TEMPLATE = "sensor.seventeentrack_package_{0}"
NOTIFICATION_DELIVERED_ID = "package_delivered_{0}"
NOTIFICATION_DELIVERED_TITLE = "Package {0} delivered"
NOTIFICATION_DELIVERED_MESSAGE = (
"Package Delivered: {0}<br />" + "Visit 17.track for more information: "
"https://t.17track.net/track#nums={1}"
)
VALUE_DELIVERED = "Delivered"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SHOW_ARCHIVED, default=False): cv.boolean,
vol.Optional(CONF_SHOW_DELIVERED, default=False): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
from py17track import Client
from py17track.errors import SeventeenTrackError
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(websession)
try:
login_result = await client.profile.login(
config[CONF_USERNAME], config[CONF_PASSWORD]
)
if not login_result:
_LOGGER.error("Invalid username and password provided")
return
except SeventeenTrackError as err:
_LOGGER.error("There was an error while logging in: %s", err)
return
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
data = SeventeenTrackData(
client,
async_add_entities,
scan_interval,
config[CONF_SHOW_ARCHIVED],
config[CONF_SHOW_DELIVERED],
)
await data.async_update()
class SeventeenTrackSummarySensor(Entity):
"""Define a summary sensor."""
def __init__(self, data, status, initial_state):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._data = data
self._state = initial_state
self._status = status
@property
def available(self):
"""Return whether the entity is available."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
return "Seventeentrack Packages {0}".format(self._status)
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return "summary_{0}_{1}".format(self._data.account_id, slugify(self._status))
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "packages"
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
package_data = []
for package in self._data.packages.values():
if package.status != self._status:
continue
package_data.append(
{
ATTR_FRIENDLY_NAME: package.friendly_name,
ATTR_INFO_TEXT: package.info_text,
ATTR_STATUS: package.status,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
)
if package_data:
self._attrs[ATTR_PACKAGES] = package_data
self._state = self._data.summary.get(self._status)
class SeventeenTrackPackageSensor(Entity):
"""Define an individual package sensor."""
def __init__(self, data, package):
"""Initialize."""
self._attrs = {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DESTINATION_COUNTRY: package.destination_country,
ATTR_INFO_TEXT: package.info_text,
ATTR_LOCATION: package.location,
ATTR_ORIGIN_COUNTRY: package.origin_country,
ATTR_PACKAGE_TYPE: package.package_type,
ATTR_TRACKING_INFO_LANGUAGE: package.tracking_info_language,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
self._data = data
self._friendly_name = package.friendly_name
self._state = package.status
self._tracking_number = package.tracking_number
self.entity_id = ENTITY_ID_TEMPLATE.format(self._tracking_number)
@property
def available(self):
"""Return whether the entity is available."""
return self._data.packages.get(self._tracking_number) is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
name = self._friendly_name
if not name:
name = self._tracking_number
return "Seventeentrack Package: {0}".format(name)
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number)
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
if not self.available:
self.hass.async_create_task(self._remove())
return
package = self._data.packages.get(self._tracking_number, None)
# If the user has elected to not see delivered packages and one gets
# delivered, post a notification:
if package.status == VALUE_DELIVERED and not self._data.show_delivered:
self._notify_delivered()
self.hass.async_create_task(self._remove())
return
self._attrs.update(
{ATTR_INFO_TEXT: package.info_text, ATTR_LOCATION: package.location}
)
self._state = package.status
self._friendly_name = package.friendly_name
async def _remove(self):
"""Remove entity itself."""
await self.async_remove()
reg = await self.hass.helpers.entity_registry.async_get_registry()
entity_id = reg.async_get_entity_id(
"sensor",
"seventeentrack",
UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number),
)
if entity_id:
reg.async_remove(entity_id)
def _notify_delivered(self):
"""Notify when package is delivered."""
_LOGGER.info("Package delivered: %s", self._tracking_number)
identification = (
self._friendly_name if self._friendly_name else self._tracking_number
)
message = NOTIFICATION_DELIVERED_MESSAGE.format(
self._tracking_number, identification
)
title = NOTIFICATION_DELIVERED_TITLE.format(identification)
notification_id = NOTIFICATION_DELIVERED_TITLE.format(self._tracking_number)
self.hass.components.persistent_notification.create(
message, title=title, notification_id=notification_id
)
class SeventeenTrackData:
"""Define a data handler for 17track.net."""
def __init__(
self, client, async_add_entities, scan_interval, show_archived, show_delivered
):
"""Initialize."""
self._async_add_entities = async_add_entities
self._client = client
self._scan_interval = scan_interval
self._show_archived = show_archived
self.account_id = client.profile.account_id
self.packages = {}
self.show_delivered = show_delivered
self.summary = {}
self.async_update = Throttle(self._scan_interval)(self._async_update)
self.first_update = True
async def _async_update(self):
"""Get updated data from 17track.net."""
from py17track.errors import SeventeenTrackError
try:
packages = await self._client.profile.packages(
show_archived=self._show_archived
)
_LOGGER.debug("New package data received: %s", packages)
new_packages = {p.tracking_number: p for p in packages}
to_add = set(new_packages) - set(self.packages)
_LOGGER.debug("Will add new tracking numbers: %s", to_add)
if to_add:
self._async_add_entities(
[
SeventeenTrackPackageSensor(self, new_packages[tracking_number])
for tracking_number in to_add
],
True,
)
self.packages = new_packages
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving packages: %s", err)
try:
self.summary = await self._client.profile.summary(
show_archived=self._show_archived
)
_LOGGER.debug("New summary data received: %s", self.summary)
# creating summary sensors on first update
if self.first_update:
self.first_update = False
self._async_add_entities(
[
SeventeenTrackSummarySensor(self, status, quantity)
for status, quantity in self.summary.items()
],
True,
)
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving the summary: %s", err)
self.summary = {}
|
fbradyirl/home-assistant
|
homeassistant/components/seventeentrack/sensor.py
|
Python
|
apache-2.0
| 10,883
|
[
"VisIt"
] |
efdd262925dda82df82a206654fbe077514d0129096360ddda8f650536c28d4e
|
# coding: utf-8
# Copyright 2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Unit-tests for the Beam Profile class.
Run as python testBeamProfileObject.py in console or via travis. **
:Authors: **Danilo Quartullo**
'''
# General imports
# -----------------
from __future__ import division, print_function
import unittest
import numpy as np
import os
# import matplotlib.pyplot as plt
# BLonD imports
# --------------
from blond.beam.beam import Beam
from blond.input_parameters.ring import Ring
import blond.beam.profile as profileModule
from blond.beam.beam import Proton
from blond.input_parameters.rf_parameters import RFStation
class testProfileClass(unittest.TestCase):
# Run before every test
def setUp(self):
"""
Slicing of the same Gaussian profile using four distinct settings to
test different features.
"""
# Ring parameters
n_turns = 1
ring_length = 125
alpha = 0.001
momentum = 1e9
# Ring object initialization
self.ring = Ring(ring_length, alpha, momentum, Proton(), n_turns)
# RF object initialization
self.rf_params = RFStation(Ring=self.ring, harmonic=[1],
voltage=[7e6], phi_rf_d=[0.],
n_rf=1)
# Beam parameters
n_macroparticles = 100000
intensity = 1e10
# Beam object parameters
dir_path = os.path.dirname(os.path.realpath(__file__))
my_beam = Beam(self.ring, n_macroparticles, intensity)
my_beam.dt = np.load(dir_path+'/dt_coordinates.npz')['arr_0']
# First profile object initialization and tracking
self.profile1 = profileModule.Profile(my_beam)
self.profile1.track()
# Second profile object initialization and tracking
n_slices = 200
CutOptions = profileModule.CutOptions(
cut_left=0, cut_right=2*np.pi,
n_slices=n_slices,
cuts_unit='rad',
RFSectionParameters=self.rf_params)
FitOptions = profileModule.FitOptions(
fit_option='fwhm',
fitExtraOptions=None)
FilterOptions = profileModule.FilterOptions(
filterMethod=None,
filterExtraOptions=None)
OtherSlicesOptions = profileModule.OtherSlicesOptions(
smooth=False,
direct_slicing=True)
self.profile2 = profileModule.Profile(
my_beam,
CutOptions=CutOptions,
FitOptions=FitOptions,
FilterOptions=FilterOptions,
OtherSlicesOptions=OtherSlicesOptions)
# Third profile object initialization and tracking
n_slices = 150
CutOptions = profileModule.CutOptions(
cut_left=0,
cut_right=self.ring.t_rev[0],
n_slices=n_slices,
cuts_unit='s')
FitOptions = profileModule.FitOptions(
fit_option='rms',
fitExtraOptions=None)
FilterOptions = profileModule.FilterOptions(
filterMethod=None,
filterExtraOptions=None)
OtherSlicesOptions = profileModule.OtherSlicesOptions(
smooth=True,
direct_slicing=False)
self.profile3 = profileModule.Profile(
my_beam,
CutOptions=CutOptions,
FitOptions=FitOptions,
FilterOptions=FilterOptions,
OtherSlicesOptions=OtherSlicesOptions)
self.profile3.track()
# Fourth profile object initialization and tracking
n_slices = 100
CutOptions = profileModule.CutOptions(
cut_left=0,
cut_right=self.ring.t_rev[0],
n_slices=n_slices,
cuts_unit='s')
FitOptions = profileModule.FitOptions(
fit_option='gaussian',
fitExtraOptions=None)
filter_option = {'pass_frequency': 1e7,
'stop_frequency': 1e8,
'gain_pass': 1,
'gain_stop': 2,
'transfer_function_plot': False}
FilterOptions = profileModule.FilterOptions(
filterMethod='chebishev',
filterExtraOptions=filter_option)
OtherSlicesOptions = profileModule.OtherSlicesOptions(
smooth=False,
direct_slicing=True)
self.profile4 = profileModule.Profile(
my_beam,
CutOptions=CutOptions,
FitOptions=FitOptions,
FilterOptions=FilterOptions,
OtherSlicesOptions=OtherSlicesOptions)
def test(self):
rtol = 1e-6 # relative tolerance
atol = 0 # absolute tolerance
delta = 1e-14
self.assertAlmostEqual(self.ring.t_rev[0], 5.71753954209e-07,
delta=delta, msg='Ring: t_rev[0] not correct')
self.assertSequenceEqual(
self.profile1.n_macroparticles.tolist(),
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 2.0, 5.0,
7.0, 10.0, 9.0, 21.0, 20.0, 35.0, 40.0, 59.0, 70.0, 97.0, 146.0,
164.0, 183.0, 246.0, 324.0, 412.0, 422.0, 523.0, 637.0, 797.0,
909.0, 1074.0, 1237.0, 1470.0, 1641.0, 1916.0, 1991.0, 2180.0,
2464.0, 2601.0, 3010.0, 3122.0, 3233.0, 3525.0, 3521.0, 3657.0,
3650.0, 3782.0, 3845.0, 3770.0, 3763.0, 3655.0, 3403.0, 3433.0,
3066.0, 3038.0, 2781.0, 2657.0, 2274.0, 2184.0, 1934.0, 1724.0,
1452.0, 1296.0, 1111.0, 985.0, 803.0, 694.0, 546.0, 490.0, 429.0,
324.0, 239.0, 216.0, 147.0, 122.0, 87.0, 83.0, 62.0, 48.0, 28.0,
26.0, 14.0, 18.0, 12.0, 6.0, 7.0, 4.0, 2.0, 2.0, 1.0, 1.0, 2.0,
0.0, 0.0, 0.0, 0.0],
msg="Profile1 not correct")
self.assertSequenceEqual(
self.profile2.n_macroparticles.tolist(),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, 1.0, 0.0, 4.0, 6.0, 11.0, 13.0, 24.0, 31.0, 50.0, 71.0,
99.0, 154.0, 212.0, 246.0, 365.0, 502.0, 560.0, 741.0, 942.0,
1183.0, 1436.0, 1824.0, 2114.0, 2504.0, 2728.0, 3132.0, 3472.0,
4011.0, 4213.0, 4592.0, 4667.0, 4742.0, 4973.0, 4973.0, 5001.0,
4737.0, 4470.0, 4245.0, 4031.0, 3633.0, 3234.0, 2877.0, 2540.0,
2138.0, 1754.0, 1466.0, 1224.0, 966.0, 746.0, 632.0, 468.0, 347.0,
272.0, 178.0, 122.0, 100.0, 75.0, 49.0, 31.0, 18.0, 20.0, 10.0,
9.0, 4.0, 2.0, 1.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
msg="Profile2 not correct")
np.testing.assert_allclose(
self.profile3.n_macroparticles.tolist(),
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.823322140525,
0.176677859475, 0.0, -0.255750723657, 1.58255526749, 5.591836814,
8.52273531218, 26.7796551186, 34.3314288768, 51.0732749092,
96.1902354197, 157.367580381, 225.376283353, 374.772960616,
486.894110696, 747.13998452, 949.483971664, 1340.35510048,
1824.62356727, 2240.79797642, 2904.90319468, 3577.06628686,
4007.96759936, 4751.84403436, 5592.30491982, 5865.72609993,
6254.85130914, 6437.0578678, 6667.35522794, 6505.87820056,
6175.35102937, 5744.45114657, 5166.14734563, 4570.9693115,
3838.66240227, 3311.19755852, 2643.39729925, 2057.56970401,
1570.88713405, 1167.90898075, 812.225096261, 674.455899421,
401.231764382, 254.280874938, 178.332275974, 128.130564109,
70.1303218524, 41.1867595808, 24.1789148058, 15.6635863931,
8.08619267781, 2.85660788584, 4.39836119938, 1.71862177506,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=rtol, atol=atol,
err_msg="Profile3 not correct")
np.testing.assert_allclose(
self.profile4.n_macroparticles.tolist(),
[3.04390342739e-10, 6.42161174461e-10, 1.3547439458e-09,
2.8580537592e-09, 6.0295314962e-09, 1.27202820964e-08,
2.68355139555e-08, 5.66139024119e-08, 1.19436279537e-07,
2.51970351131e-07, 5.31572635175e-07, 1.12143934871e-06,
2.36585958272e-06, 4.99116744172e-06, 1.05296834238e-05,
2.22140880466e-05, 4.68642491784e-05, 9.88677926569e-05,
0.000208577766554, 0.000440028886368, 0.000928312849628,
0.00195842767028, 0.0041316232359, 0.0087163344465,
0.0183885320237, 0.0387936135382, 0.0818414677914,
0.172657951641, 0.364250166442, 0.768445255442, 1.62116085321,
3.42010376585, 7.06934966864, 14.4070273867, 29.1322663172,
57.0673409924, 107.367201129, 195.236438646, 343.216047763,
576.683888858, 925.972171496, 1429.04196501, 2121.53804001,
3024.03256787, 4107.04688385, 5292.37720345, 6501.56643031,
7614.78027336, 8457.77133102, 8917.59371854, 8940.7377173,
8510.50678563, 7697.68236675, 6617.05862601, 5396.53357642,
4175.00669728, 3071.53152854, 2158.0602225, 1451.53603502,
931.07854988, 569.13404729, 335.967612417, 192.376031593,
106.641608202, 57.6152988997, 30.252175918, 15.4392857786,
7.59503067718, 3.60011486661, 1.70648778177, 0.808891009658,
0.383421828445, 0.181745496949, 0.0861490484128,
0.0408354466385, 0.0193563798194, 0.00917510326339,
0.00434908390304, 0.00206150604006, 0.000977172951353,
0.000463189027003, 0.000219555887664, 0.000104071523714,
4.93308658832e-05, 2.33832872042e-05, 1.10838946506e-05,
5.25386869489e-06, 2.49038240919e-06, 1.18046432148e-06,
5.59551018822e-07, 2.65232364082e-07, 1.25722596556e-07,
5.95936750584e-08, 2.82479541435e-08, 1.33897925887e-08,
6.34688728235e-09, 3.00848674436e-09, 1.42605775497e-09,
6.75979730414e-10, 3.20452612975e-10],
rtol=rtol, atol=atol,
err_msg="Profile4 not correct")
np.testing.assert_allclose(
np.array([self.profile2.bunchPosition,
self.profile3.bunchPosition,
self.profile4.bunchPosition]),
[2.86004598801e-07, 2.86942707778e-07, 2.86090181555e-07],
rtol=rtol, atol=atol,
err_msg='Bunch position values not correct')
np.testing.assert_allclose(
np.array([self.profile2.bunchLength,
self.profile3.bunchLength,
self.profile4.bunchLength]),
[9.27853156526e-08, 9.24434506817e-08, 9.18544356769e-08],
rtol=rtol, atol=atol,
err_msg='Bunch length values not correct')
if __name__ == '__main__':
unittest.main()
|
dquartul/BLonD
|
unittests/beam_profile/test_beam_profile_object.py
|
Python
|
gpl-3.0
| 12,422
|
[
"Gaussian"
] |
6df566c2ab07e84b569a8114ec6ea737191486d49cf4fb32e563457d2002f603
|
import base64
import json
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.utils import override_settings
from guardian.shortcuts import assign_perm, get_anonymous_user
from geonode.geoserver.helpers import OGC_Servers_Handler
from geonode.base.populate_test_data import create_models
from geonode.layers.populate_layers_data import create_layer_data
from geonode.layers.models import Layer
class LayerTests(TestCase):
fixtures = ['bobby']
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
create_models(type='layer')
create_layer_data()
def test_style_manager(self):
"""
Ensures the layer_style_manage route returns a 200.
"""
layer = Layer.objects.all()[0]
bob = get_user_model().objects.get(username='bobby')
assign_perm('change_layer_style', bob, layer)
c = Client()
logged_in = c.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = c.get(reverse('layer_style_manage', args=(layer.typename,)))
self.assertEqual(response.status_code, 200)
def test_feature_edit_check(self):
"""Verify that the feature_edit_check view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].typename
invalid_layer_typename = "n0ch@nc3"
c = Client()
# Test that an invalid layer.typename is handled for properly
response = c.post(
reverse(
'feature_edit_check',
args=(
invalid_layer_typename,
)))
self.assertEquals(response.status_code, 404)
# First test un-authenticated
response = c.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Next Test with a user that does NOT have the proper perms
logged_in = c.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = c.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Login as a user with the proper permission and test the endpoint
logged_in = c.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = c.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
# Test that the method returns 401 because it's not a datastore
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
layer = Layer.objects.all()[0]
layer.storeType = "dataStore"
layer.save()
# Test that the method returns authorized=True if it's a datastore
if settings.OGC_SERVER['default']['DATASTORE']:
# The check was moved from the template into the view
response = c.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], True)
def test_layer_acls(self):
""" Verify that the layer_acls view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = '%s:%s' % ('bobby', 'bob')
invalid_uname_pw = '%s:%s' % ('n0t', 'v@l1d')
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
bob = get_user_model().objects.get(username='bobby')
layer_ca = Layer.objects.get(typename='geonode:CA')
assign_perm('change_layer_data', bob, layer_ca)
# Test that requesting when supplying the geoserver credentials returns
# the expected json
expected_result = {
u'email': u'bobby@bob.com',
u'fullname': u'bobby',
u'is_anonymous': False,
u'is_superuser': False,
u'name': u'bobby',
u'ro': [u'geonode:layer2',
u'geonode:mylayer',
u'geonode:foo',
u'geonode:whatever',
u'geonode:fooey',
u'geonode:quux',
u'geonode:fleem'],
u'rw': [u'geonode:CA']
}
c = Client()
response = c.get(reverse('layer_acls'), **valid_auth_headers)
response_json = json.loads(response.content)
self.assertEquals(expected_result, response_json)
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = c.get(reverse('layer_acls'), **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
c.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = c.get(reverse('layer_acls'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('', response_json['email'])
# TODO Lots more to do here once jj0hns0n understands the ACL system
# better
def test_resolve_user(self):
"""Verify that the resolve_user view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = "%s:%s" % ('admin', 'admin')
invalid_uname_pw = "%s:%s" % ("n0t", "v@l1d")
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
c = Client()
response = c.get(reverse('layer_resolve_user'), **valid_auth_headers)
response_json = json.loads(response.content)
self.assertEquals({'geoserver': False,
'superuser': True,
'user': 'admin',
'fullname': 'admin',
'email': ''},
response_json)
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = c.get(reverse('layer_acls'), **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
c.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = c.get(reverse('layer_resolve_user'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['user'])
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('', response_json['email'])
class UtilsTests(TestCase):
def setUp(self):
self.OGC_DEFAULT_SETTINGS = {
'default': {
'BACKEND': 'geonode.geoserver',
'LOCATION': 'http://localhost:8080/geoserver/',
'USER': 'admin',
'PASSWORD': 'geoserver',
'MAPFISH_PRINT_ENABLED': True,
'PRINT_NG_ENABLED': True,
'GEONODE_SECURITY_ENABLED': True,
'GEOGIT_ENABLED': False,
'WMST_ENABLED': False,
'BACKEND_WRITE_ENABLED': True,
'WPS_ENABLED': False,
'DATASTORE': str(),
'GEOGIT_DATASTORE_DIR': str(),
}
}
self.UPLOADER_DEFAULT_SETTINGS = {
'BACKEND': 'geonode.rest',
'OPTIONS': {
'TIME_ENABLED': False,
'GEOGIT_ENABLED': False}}
self.DATABASE_DEFAULT_SETTINGS = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'development.db'}}
def test_ogc_server_settings(self):
"""
Tests the OGC Servers Handler class.
"""
with override_settings(OGC_SERVER=self.OGC_DEFAULT_SETTINGS, UPLOADER=self.UPLOADER_DEFAULT_SETTINGS):
OGC_SERVER = self.OGC_DEFAULT_SETTINGS.copy()
OGC_SERVER.update(
{'PUBLIC_LOCATION': 'http://localhost:8080/geoserver/'})
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
default = OGC_SERVER.get('default')
self.assertEqual(ogc_settings.server, default)
self.assertEqual(ogc_settings.BACKEND, default.get('BACKEND'))
self.assertEqual(ogc_settings.LOCATION, default.get('LOCATION'))
self.assertEqual(
ogc_settings.PUBLIC_LOCATION,
default.get('PUBLIC_LOCATION'))
self.assertEqual(ogc_settings.USER, default.get('USER'))
self.assertEqual(ogc_settings.PASSWORD, default.get('PASSWORD'))
self.assertEqual(ogc_settings.DATASTORE, str())
self.assertEqual(ogc_settings.credentials, ('admin', 'geoserver'))
self.assertTrue(ogc_settings.MAPFISH_PRINT_ENABLED)
self.assertTrue(ogc_settings.PRINT_NG_ENABLED)
self.assertTrue(ogc_settings.GEONODE_SECURITY_ENABLED)
self.assertFalse(ogc_settings.GEOGIT_ENABLED)
self.assertFalse(ogc_settings.WMST_ENABLED)
self.assertTrue(ogc_settings.BACKEND_WRITE_ENABLED)
self.assertFalse(ogc_settings.WPS_ENABLED)
def test_ogc_server_defaults(self):
"""
Tests that OGC_SERVER_SETTINGS are built if they do not exist in the settings.
"""
OGC_SERVER = {'default': dict()}
defaults = self.OGC_DEFAULT_SETTINGS.get('default')
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
self.assertEqual(ogc_settings.server, defaults)
self.assertEqual(ogc_settings.rest, defaults['LOCATION'] + 'rest')
self.assertEqual(ogc_settings.ows, defaults['LOCATION'] + 'ows')
# Make sure we get None vs a KeyError when the key does not exist
self.assertIsNone(ogc_settings.SFDSDFDSF)
def test_importer_configuration(self):
"""
Tests that the OGC_Servers_Handler throws an ImproperlyConfigured exception when using the importer
backend without a vector database and a datastore configured.
"""
database_settings = self.DATABASE_DEFAULT_SETTINGS.copy()
ogc_server_settings = self.OGC_DEFAULT_SETTINGS.copy()
uploader_settings = self.UPLOADER_DEFAULT_SETTINGS.copy()
uploader_settings['BACKEND'] = 'geonode.importer'
self.assertTrue(['geonode_imports' not in database_settings.keys()])
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
# Test the importer backend without specifying a datastore or
# corresponding database.
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
ogc_server_settings['default']['DATASTORE'] = 'geonode_imports'
# Test the importer backend with a datastore but no corresponding
# database.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
database_settings['geonode_imports'] = database_settings[
'default'].copy()
database_settings['geonode_imports'].update(
{'NAME': 'geonode_imports'})
# Test the importer backend with a datastore and a corresponding
# database, no exceptions should be thrown.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
OGC_Servers_Handler(ogc_server_settings)['default']
class SecurityTest(TestCase):
"""
Tests for the Geonode security app.
"""
def setUp(self):
self.admin, created = get_user_model().objects.get_or_create(
username='admin', password='admin', is_superuser=True)
def test_login_middleware(self):
"""
Tests the Geonode login required authentication middleware.
"""
from geonode.security.middleware import LoginRequiredMiddleware
middleware = LoginRequiredMiddleware()
white_list = [
reverse('account_ajax_login'),
reverse('account_confirm_email', kwargs=dict(key='test')),
reverse('account_login'),
reverse('account_password_reset'),
reverse('forgot_username'),
reverse('layer_acls'),
reverse('layer_resolve_user'),
]
black_list = [
reverse('account_signup'),
reverse('document_browse'),
reverse('maps_browse'),
reverse('layer_browse'),
reverse('layer_detail', kwargs=dict(layername='geonode:Test')),
reverse('layer_remove', kwargs=dict(layername='geonode:Test')),
reverse('profile_browse'),
]
request = HttpRequest()
request.user = get_anonymous_user()
# Requests should be redirected to the the `redirected_to` path when un-authenticated user attempts to visit
# a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertEqual(response.status_code, 302)
self.assertTrue(
response.get('Location').startswith(
middleware.redirect_to))
# The middleware should return None when an un-authenticated user
# attempts to visit a white-listed url.
for path in white_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(
response,
msg="Middleware activated for white listed path: {0}".format(path))
c = Client()
c.login(username='admin', password='admin')
self.assertTrue(self.admin.is_authenticated())
request.user = self.admin
# The middleware should return None when an authenticated user attempts
# to visit a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(response)
|
boedy1996/SPARC
|
geonode/geoserver/tests.py
|
Python
|
gpl-3.0
| 15,606
|
[
"VisIt"
] |
1de41f7834eeb4dacdc718d99772a09e04d6b04e0c07fb27b593da001457c556
|
import logging
import yaml
from .bot import Espresso
config = yaml.load(file('botconfig.yaml', 'r'))
logging.basicConfig(level=getattr(logging, config['logging']['level']),
format="%(levelname)s from %(filename)s at %(asctime)s | %(message)s")
logging.debug("config is %s", config)
robot = Espresso(config)
|
ratchetrobotics/espresso
|
espresso/main.py
|
Python
|
bsd-3-clause
| 332
|
[
"ESPResSo"
] |
81ec9390ac5ead78c992e1d7fd65756cbc183ffdebe33a03c15b5d7808bd879c
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from settings_global import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Your Name', 'yourname@example.com'),
)
DEFAULT_FROM_EMAIL = "website@example.com"
MANAGERS = ADMINS
ROOT_URL = 'http://example.com'
PIE_ROOT = '/var/www/pie'
STATIC_DOC_ROOT = PIE_ROOT + '/public'
MEDIA_ROOT = PIE_ROOT + '/public/'
TEMPLATE_DIRS = (
PIE_ROOT + '/templates',
)
# Logging helps debugging
#import logging
#logging.basicConfig(
# level = logging.INFO,
# format = '%(asctime)s %(levelname)s %(message)s',
# filename = PIE_ROOT + '/newsmixer.log',
# filemode = 'a'
#)
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Replace with keys from Facebook
FACEBOOK_API_KEY = '00000000000000000000000000000000'
FACEBOOK_SECRET_KEY = '00000000000000000000000000000000'
FACEBOOK_INTERNAL = False
# Make this unique, and don't share it with anybody.
SECRET_KEY = '00000000000000000000000000000000000000000000000000'
# Please setup caching
#CACHE_BACKEND = 'db://newsmixer_cache'
# If you're using Pie as a backend for widgets, turn this on. Little things will change
#WIDGET_MODE = True
|
brianboyer/newsmixer
|
social/settings.example.py
|
Python
|
gpl-3.0
| 2,370
|
[
"Brian"
] |
55cf65763710158364da1f411516ba31f4fac00b0164974122f5fdd9c274d5fc
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from itertools import chain
from MyHearthStone import ext
from MyHearthStone.ext import blank_minion
from MyHearthStone.ext import std_events, std_triggers
from MyHearthStone.ext import enc_common
from MyHearthStone.ext import Spell, Hero, HeroPower, Enchantment
from MyHearthStone.utils.game import order_of_play, Zone, DHBonusEventType
__author__ = 'fyabc'
#############
# Druid (1) #
#############
# Druid (0) *
class StdDruid(Hero):
data = {
'id': 0,
'klass': 1, 'hero_power': 0,
}
class Enc_变形(Enchantment):
data = {'id': 10000}
def __init__(self, game, target, **kwargs):
super().__init__(game, target, **kwargs)
std_triggers.DetachOnTurnEnd(self.game, self)
apply, apply_imm = enc_common.apply_fn_add_attack(1)
class 变形(HeroPower):
data = {
'id': 0,
'klass': 1, 'is_basic': True, 'cost': 2,
}
def run(self, target, **kwargs):
hero = self.game.get_hero(self.player_id)
Enc_变形.from_card(self, self.game, hero)
return [std_events.GainArmor(self.game, self, hero, 1)]
# 埃隆巴克保护者 (10000)
blank_minion({
'id': 10000,
'klass': 1, 'cost': 8, 'attack': 8, 'health': 8,
'taunt': True,
})
# 月火术 (10001)
class 月火术(Spell):
data = {
'id': 10001,
'type': 1, 'klass': 1, 'cost': 0,
'po_tree': '$HaveTarget',
}
ext.add_dh_bonus_data(data, 1)
run = ext.damage_fn(data.get('dh_values', [])[0])
# 激活 (10002)
class 激活(Spell):
data = {
'id': 10002,
'type': 1, 'klass': 1, 'cost': 0,
}
def run(self, target, **kwargs):
self.game.add_mana(1, 'T', self.player_id)
return []
# 爪击 (10003) *
class Enc_爪击(Enchantment):
data = {'id': 10001}
def __init__(self, game, target, **kwargs):
super().__init__(game, target, **kwargs)
std_triggers.DetachOnTurnEnd(self.game, self)
apply, apply_imm = enc_common.apply_fn_add_attack(2)
class 爪击(Spell):
data = {
'id': 10003,
'type': 1, 'klass': 1, 'cost': 1,
}
def run(self, target, **kwargs):
hero = self.game.get_hero(self.player_id)
Enc_爪击.from_card(self, self.game, hero)
return [std_events.GainArmor(self.game, self, hero, 2)]
# 野性印记 (10004) *
Enc_野性印记 = ext.create_enchantment(
{'id': 10002}, *enc_common.apply_fn_add_a_h(2, 2, apply_other=enc_common.set_target_attr_temp('taunt', True)))
class 野性印记(Spell):
data = {
'id': 10004,
'type': 1, 'klass': 1, 'cost': 2,
'po_tree': '$HaveTarget',
}
check_target = ext.checker_minion
def run(self, target, **kwargs):
Enc_野性印记.from_card(self, self.game, target)
return []
# 野性成长 (10005)
class 野性成长(Spell):
data = {
'id': 10005,
'type': 1, 'klass': 1, 'cost': 2,
}
def run(self, target, **kwargs):
"""Run this spell.
See <https://hearthstone.gamepedia.com/Wild_Growth#Notes> for more details.
1. If, after paying this card's Cost, the casting player has 10 available and/or maximum mana,
this card will generate an Excess Mana card in the player's hand.
2. Otherwise, it will give an empty Mana Crystal to the casting player.
"""
player = self.game.get_player(self.player_id)
if player.displayed_mana() >= player.ManaMax or player.max_mana >= player.ManaMax:
# TODO: Test the card generation.
_, status = player.generate(Zone.Hand, 'last', "10010")
return status['events']
else:
player.add_mana(1, 'M')
return []
# 治疗之触 (10006)
class 治疗之触(Spell):
data = {
'id': 10006,
'type': 1, 'klass': 1, 'cost': 3,
'po_tree': '$HaveTarget',
}
ext.add_dh_bonus_data(data, 8, DHBonusEventType.Healing)
def run(self, target, **kwargs):
return [std_events.Healing(self.game, self, target, self.dh_values[0])]
# 野蛮咆哮 (10007) *
class Enc_野蛮咆哮(Enchantment):
data = {'id': 10003}
def __init__(self, game, target, **kwargs):
super().__init__(game, target, **kwargs)
std_triggers.DetachOnTurnEnd(self.game, self)
apply, apply_imm = enc_common.apply_fn_add_attack(2)
class 野蛮咆哮(Spell):
data = {
'id': 10007,
'type': 1, 'klass': 1, 'cost': 3,
}
def run(self, target, **kwargs):
targets = ext.collect_1p(self, oop=True, player_id=self.player_id)
for e in targets:
Enc_野蛮咆哮.from_card(self, self.game, e)
return []
# 横扫 (10008)
class 横扫(Spell):
data = {
'id': 10008,
'type': 1, 'klass': 1, 'cost': 4,
'po_tree': '$HaveTarget',
}
ext.add_dh_bonus_data(data, [4, 1])
check_target = ext.checker_enemy_character
def run(self, target, **kwargs):
"""See <https://hearthstone.gamepedia.com/Swipe#Notes> for more details.
Like most area of effect damaging effect, Swipe deals all its damage before any on-damage
triggered effects are activated. However, Swipe creates Damage Events in an unusual order:
first for the targeted character, and then for all other enemy characters in reverse order
of play; then, all Damage Events are resolved in the same order.
"""
targets = [target]
values = [self.dh_values[0]]
for entity in order_of_play(
chain(self.game.get_zone(Zone.Play, 1 - self.player_id),
self.game.get_zone(Zone.Hero, 1 - self.player_id)), reverse=True):
if entity is target:
continue
targets.append(entity)
values.append(self.dh_values[1])
return [std_events.AreaDamage(self.game, self, targets, values)]
# 星火术 (10009)
class 星火术(Spell):
data = {
'id': 10009,
'type': 1, 'klass': 1, 'cost': 6,
'po_tree': '$HaveTarget',
}
ext.add_dh_bonus_data(data, 5)
def run(self, target, **kwargs):
return [std_events.Damage(self.game, self, target, self.dh_values[0]),
std_events.DrawCard(self.game, self, self.player_id)]
# 法力过剩 (10010)
class 法力过剩(Spell):
data = {
'id': 10010,
'type': 1, 'klass': 1, 'rarity': -1, 'cost': 0,
'derivative': True,
}
run = ext.draw_card_fn(1)
|
fyabc/MiniGames
|
HearthStone2/MyHearthStone/data/packages/basic/basic_druid.py
|
Python
|
mit
| 6,554
|
[
"CRYSTAL"
] |
f21ca22fe75f5b8e071529355b6a8b52de3738a4dbb21310ec26a9b1f5f9b191
|
import numpy as np
import parabem
from parabem.pan2d import vortex_2, vortex_2_v
from parabem.vtk_export import VtkWriter
from parabem.utils import check_path
source = parabem.Vector2(0, 0)
direction = parabem.Vector2(1, 0)
n = 20
a = np.linspace(-10, 10, n).tolist()
b = [parabem.Vector2(i, j) for i in a for j in a]
vel = [vortex_2_v(target, source) for target in b]
pot = [vortex_2(target, source, direction) for target in b]
writer = VtkWriter()
with open(check_path("results/vortex_2_field.vtk"), "w") as _file:
writer.structed_grid(_file, "vortex", [n, n, 1])
writer.points(_file, b)
writer.data(_file, vel, name="velocity", _type="VECTORS", data_type="POINT_DATA")
writer.data(_file, pot, name="potential", _type="SCALARS", data_type="POINT_DATA")
|
booya-at/paraBEM
|
examples/vtk/vtk_vortex_2.py
|
Python
|
gpl-3.0
| 775
|
[
"VTK"
] |
9ec2857e473bb2e8e643aa30aa53234f7caa299f46ae8ead2c3470b5d959ceb3
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
#from unittest import TestCase
#from exatomic import Universe
#from exatomic.gaussian import Output, Input
#from exatomic.gaussian.inputs import _handle_args
#class TestInput(TestCase):
# """Tests the input file generation functionality for Gaussian."""
# pass
# def setUp(self):
# fl = Output(os.sep.join(__file__.split(os.sep)[:-1]
# + ['gaussian-uo2.out']))
# self.uni = Universe(atom=fl.atom)
# self.keys = ['link0', 'route', 'basis', 'ecp']
# self.lisopt = [('key1', 'value1'), ('key2', 'value2')]
# self.dicopt = {'key1': 'value1', 'key2': 'value2'}
# self.tupopt = (('key1', 'value1'), ('key2', 'value2'))
# self.stropt = 'value'
#
# def test_from_universe(self):
# """Test the from_universe class method for input generation."""
# fl = Input.from_universe(self.uni, link0=self.lisopt,
# route=self.dicopt, basis=self.tupopt)
# self.assertEqual(fl[0][0], '%')
# self.assertEqual(fl[2][0], '#')
# self.assertEqual(len(fl.find('****')), 2)
# self.assertEqual(len(fl), 18)
#
# def test__handle_args(self):
# """Test the argument handler helper function."""
# for key in self.keys:
# lval = _handle_args(key, self.lisopt)
# self.assertEqual(lval, _handle_args(key, self.tupopt))
# self.assertEqual(self.stropt, _handle_args(key, self.stropt))
|
exa-analytics/atomic
|
exatomic/gaussian/tests/test_inputs.py
|
Python
|
apache-2.0
| 1,613
|
[
"Gaussian"
] |
4a1416ced33f8b127964eb174ba11e6a0ce7f99400d92dace11ee40e757353b0
|
import os
import sys
import math
import collections
## Distribution ########################################################################################
class Distribution(object):
def __init__(self):
raise NotImplementedError("Subclasses should override.")
@classmethod
def mleEstimate(cls, points):
raise NotImplementedError("Subclasses should override.")
@classmethod
def momEstimate(cls, points):
raise NotImplementedError("Subclasses should override.")
## ContinuousDistribution ##############################################################################
class ContinuousDistribution(Distribution):
def pdf(self, value):
raise NotImplementedError("Subclasses should override.")
def cdf(self, value):
raise NotImplementedError("Subclasses should override.")
## Uniform #############################################################################################
class Uniform(ContinuousDistribution):
def __init__(self, alpha, beta):
if alpha == beta: raise ParametrizationError("alpha and beta cannot be equivalent")
self.alpha = alpha
self.beta = beta
self.range = beta - alpha
def pdf(self, value):
if value < self.alpha or value > self.beta: return 0.0
else: return 1.0 / self.range
def cdf(self, value):
if value < self.alpha: return 0.0
elif value >= self.beta: return 1.0
else: return (value - self.alpha) / self.range
def __str__(self):
return "Continuous Uniform distribution: alpha = %s, beta = %s" % (self.alpha, self.beta)
@classmethod
def mleEstimate(cls, points):
return cls(min(points), max(points))
## Gaussian ############################################################################################
class Gaussian(ContinuousDistribution):
def __init__(self, mean, stdev):
self.mean = mean
self.stdev = stdev
if stdev == 0.0: raise ParametrizationError("standard deviation must be non-zero")
if stdev < 0.0: raise ParametrizationError("standard deviation must be positive")
self.variance = math.pow(stdev, 2.0)
def pdf(self, value):
numerator = math.exp(-math.pow(float(value - self.mean) / self.stdev, 2.0) / 2.0)
denominator = math.sqrt(2 * math.pi * self.variance)
return numerator / denominator
def cdf(self, value):
return 0.5 * (1.0 + math.erf((value - self.mean) / math.sqrt(2.0 * self.variance)))
def __str__(self):
return "Continuous Gaussian (Normal) distribution: mean = %s, standard deviation = %s" % (self.mean, self.stdev)
@classmethod
def mleEstimate(cls, points):
numPoints = float(len(points))
if numPoints <= 1: raise EstimationError("must provide at least 2 training points")
mean = sum(points) / numPoints
variance = 0.0
for point in points:
variance += math.pow(float(point) - mean, 2.0)
variance /= (numPoints - 1.0)
stdev = math.sqrt(variance)
return cls(mean, stdev)
## TruncatedGaussian ##################################################################################
class TruncatedGaussian(ContinuousDistribution):
def __init__(self, mean, stdev, alpha, beta):
self.mean = mean
if stdev == 0.0: raise ParametrizationError("standard deviation must be non-zero")
if stdev < 0.0: raise ParametrizationError("standard deviation must be positive")
self.stdev = stdev
self.variance = math.pow(stdev, 2.0)
self.alpha = alpha
self.beta = beta
def pdf(self, value):
if self.alpha == self.beta or self.__phi(self.alpha) == self.__phi(self.beta):
if value == self.alpha: return 1.0
else: return 0.0
else:
numerator = math.exp(-math.pow((value - self.mean) / self.stdev, 2.0) / 2.0)
denominator = math.sqrt(2 * math.pi) * self.stdev * (self.__phi(self.beta) - self.__phi(self.alpha))
return numerator / denominator
def cdf(self, value):
if value < self.alpha or value > self.beta:
return 0.0
else:
numerator = self.__phi((value - self.mean) / self.stdev) - self.__phi(self.alpha)
denominator = self.__phi(self.beta) - self.__phi(self.alpha)
return numerator / denominator
def __str__(self):
return "Continuous Truncated Gaussian (Normal) distribution: mean = %s, standard deviation = %s, lower bound = %s, upper bound = %s" % (self.mean, self.stdev, self.alpha, self.beta)
@classmethod
def mleEstimate(cls, points):
numPoints = float(len(points))
if numPoints <= 1: raise EstimationError("must provide at least 2 training points")
mean = sum(points) / numPoints
variance = 0.0
for point in points:
variance += math.pow(float(point) - mean, 2.0)
variance /= (numPoints - 1.0)
stdev = math.sqrt(variance)
return cls(mean, stdev, min(points), max(points))
def __phi(self, value):
return 0.5 * (1.0 + math.erf((value - self.mean) / (self.stdev * math.sqrt(2.0))))
## LogNormal ###########################################################################################
class LogNormal(ContinuousDistribution):
def __init__(self, mean, stdev):
self.mean = mean
self.stdev = stdev
if stdev == 0.0: raise ParametrizationError("standard deviation must be non-zero")
if stdev < 0.0: raise ParametrizationError("standard deviation must be positive")
self.variance = math.pow(stdev, 2.0)
def pdf(self, value):
if value <= 0:
return 0.0
else:
return math.exp(-math.pow(float(math.log(value) - self.mean) / self.stdev, 2.0) / 2.0) / (value * math.sqrt(2 * math.pi * self.variance))
def cdf(self, value):
return 0.5 + 0.5 * math.erf((math.log(value) - self.mean) / math.sqrt(2.0 * self.variance))
def __str__(self):
return "Continuous Log Normal distribution: mean = %s, standard deviation = %s" % (self.mean, self.stdev)
@classmethod
def mleEstimate(cls, points):
numPoints = float(len(points))
if numPoints <= 1: raise EstimationError("must provide at least 2 training points")
mean = sum(math.log(float(point)) for point in points) / numPoints
variance = 0.0
for point in points:
variance += math.pow(math.log(float(point)) - mean, 2.0)
variance /= (numPoints - 1.0)
stdev = math.sqrt(variance)
return cls(mean, stdev)
## Exponential ########################################################################################
class Exponential(ContinuousDistribution):
def __init__(self, lambdaa):
# 2 "a"s to avoid confusion with "lambda" keyword
self.lambdaa = lambdaa
def mean(self):
return 1.0 / self.lambdaa
def variance(self):
return 1.0 / pow(self.lambdaa, 2.0)
def pdf(self, value):
return self.lambdaa * math.exp(-self.lambdaa * value)
def cdf(self, value):
return 1.0 - math.exp(-self.lambdaa * value)
def __str__(self):
return "Continuous Exponential distribution: lamda = %s" % self.lambdaa
@classmethod
def mleEstimate(cls, points):
if len(points) == 0: raise EstimationError("Must provide at least one point.")
if min(points) < 0.0: raise EstimationError("Exponential distribution only supports non-negative values.")
mean = float(sum(points)) / float(len(points))
if mean == 0.0: raise ParametrizationError("Mean of points must be positive.")
return cls(1.0 / mean)
## KernelDensityEstimate ##############################################################################
class KernelDensityEstimate(ContinuousDistribution):
'''
See this paper for more information about using Gaussian
Kernal Density Estimation with the Naive Bayes Classifier:
http://www.cs.iastate.edu/~honavar/bayes-continuous.pdf
'''
def __init__(self, observedPoints):
self.observedPoints = observedPoints
self.numObservedPoints = float(len(observedPoints))
self.stdev = 1.0 / math.sqrt(self.numObservedPoints)
def pdf(self, value):
pdfValues = [self.__normalPdf(point, self.stdev, value) for point in self.observedPoints]
return sum(pdfValues) / self.numObservedPoints
def __normalPdf(self, mean, stdev, value):
numerator = math.exp(-math.pow(float(value - mean) / stdev, 2.0) / 2.0)
denominator = math.sqrt(2 * math.pi * math.pow(stdev, 2.0))
return numerator / denominator
def cdf(self, value):
raise NotImplementedError("Not implemented")
def __str__(self):
return "Continuous Gaussian Kernel Density Estimate distribution"
@classmethod
def mleEstimate(cls, points):
return cls(points)
## DiscreteDistribution ###############################################################################
class DiscreteDistribution(Distribution):
def probability(self, value):
raise NotImplementedError("Subclasses should override.")
## Uniform ############################################################################################
class DiscreteUniform(DiscreteDistribution):
def __init__(self, alpha, beta):
if alpha == beta: raise Exception("alpha and beta cannot be equivalent")
self.alpha = float(alpha)
self.beta = float(beta)
self.prob = 1.0 / (self.beta - self.alpha)
def probability(self, value):
if value < self.alpha or value > self.beta: return 0.0
else: return self.prob
def __str__(self):
return "Discrete Uniform distribution: alpha = %s, beta = %s" % (self.alpha, self.beta)
@classmethod
def mleEstimate(cls, points):
return cls(min(points), max(points))
## Poissoin ###########################################################################################
class Poisson(DiscreteDistribution):
def __init__(self, lambdaa):
# 2 "a"s to avoid confusion with "lambda" keyword
self.lambdaa = lambdaa
def probability(self, value):
try:
first = float(math.pow(self.lambdaa, value)) / float(math.factorial(value))
second = float(math.exp(-float(self.lambdaa)))
return first * second
except OverflowError as error:
# this is an approximation to the probability of very unlikely events
return 0.0
def __str__(self):
return "Discrete Poisson distribution: lamda = %s" % self.lambdaa
@classmethod
def mleEstimate(cls, points):
mean = float(sum(points)) / float(len(points))
return cls(mean)
## Multinomial #######################################################################################
class Multinomial(DiscreteDistribution):
def __init__(self, categoryCounts, smoothingFactor = 1.0):
self.categoryCounts = categoryCounts
self.numPoints = float(sum(categoryCounts.values()))
self.numCategories = float(len(categoryCounts))
self.smoothingFactor = float(smoothingFactor)
def probability(self, value):
if not value in self.categoryCounts:
return 0.0
numerator = float(self.categoryCounts[value]) + self.smoothingFactor
denominator = self.numPoints + self.numCategories * self.smoothingFactor
return numerator / denominator
def __str__(self):
return "Discrete Multinomial distribution: buckets = %s" % self.categoryCounts
@classmethod
def mleEstimate(cls, points):
categoryCounts = collections.Counter()
for point in points:
categoryCounts[point] += 1
return cls(categoryCounts)
## Binary ############################################################################################
class Binary(Multinomial):
def __init__(self, trueCount, falseCount, smoothingFactor = 1.0):
categoryCounts = { True : trueCount, False : falseCount }
Multinomial.__init__(self, categoryCounts, smoothingFactor)
def __str__(self):
return "Discrete Binary distribution: true count = %s, false count = %s" % (self.categoryCounts[True], self.categoryCounts[False])
@classmethod
def mleEstimate(cls, points, smoothingFactor = 1.0):
trueCount = 0
for point in points:
if point: trueCount += 1
falseCount = len(points) - trueCount
return cls(trueCount, falseCount, smoothingFactor)
## Errors ############################################################################################
class EstimationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ParametrizationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
ashkonf/MixedNaiveBayes
|
src/distributions.py
|
Python
|
mit
| 13,124
|
[
"Gaussian"
] |
94ee169dcfd5a5127069e394d07ec66c72a9f35128fbe8f91389ba6f19df24a5
|
from Errors import error, message
import ExprNodes
import Nodes
import Builtin
import PyrexTypes
from Cython import Utils
from PyrexTypes import py_object_type, unspecified_type
from Visitor import CythonTransform, EnvTransform
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type):
self.type = type
object_expr = TypedExprNode(py_object_type)
class MarkParallelAssignments(EnvTransform):
# Collects assignments inside parallel blocks prange, with parallel.
# Perhaps it's better to move it to ControlFlowAnalysis.
# tells us whether we're in a normal loop
in_loop = False
parallel_errors = False
def __init__(self, context):
# Track the parallel block scopes (with parallel, for i in prange())
self.parallel_block_stack = []
super(MarkParallelAssignments, self).__init__(context)
def mark_assignment(self, lhs, rhs, inplace_op=None):
if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
if lhs.entry is None:
# TODO: This shouldn't happen...
return
if self.parallel_block_stack:
parallel_node = self.parallel_block_stack[-1]
previous_assignment = parallel_node.assignments.get(lhs.entry)
# If there was a previous assignment to the variable, keep the
# previous assignment position
if previous_assignment:
pos, previous_inplace_op = previous_assignment
if (inplace_op and previous_inplace_op and
inplace_op != previous_inplace_op):
# x += y; x *= y
t = (inplace_op, previous_inplace_op)
error(lhs.pos,
"Reduction operator '%s' is inconsistent "
"with previous reduction operator '%s'" % t)
else:
pos = lhs.pos
parallel_node.assignments[lhs.entry] = (pos, inplace_op)
parallel_node.assigned_nodes.append(lhs)
elif isinstance(lhs, ExprNodes.SequenceNode):
for arg in lhs.args:
self.mark_assignment(arg, object_expr)
else:
# Could use this info to infer cdef class attributes...
pass
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_SingleAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
def visit_InPlaceAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.create_binop_node(), node.operator)
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.current_env())
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2]))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, ExprNodes.IndexNode(
node.pos,
base=sequence,
index=ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type)))
self.visitchildren(node)
return node
def visit_ForFromStatNode(self, node):
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
ExprNodes.binop_node(node.pos,
'+',
node.bound1,
node.step))
self.visitchildren(node)
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
return node
def visit_ExceptClauseNode(self, node):
if node.target is not None:
self.mark_assignment(node.target, object_expr)
self.visitchildren(node)
return node
def visit_FromCImportStatNode(self, node):
pass # Can't be assigned to...
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target, object_expr)
self.visitchildren(node)
return node
def visit_DefNode(self, node):
# use fake expressions with the right result type
if node.star_arg:
self.mark_assignment(
node.star_arg, TypedExprNode(Builtin.tuple_type))
if node.starstar_arg:
self.mark_assignment(
node.starstar_arg, TypedExprNode(Builtin.dict_type))
EnvTransform.visit_FuncDefNode(self, node)
return node
def visit_DelStatNode(self, node):
for arg in node.args:
self.mark_assignment(arg, arg)
self.visitchildren(node)
return node
def visit_ParallelStatNode(self, node):
if self.parallel_block_stack:
node.parent = self.parallel_block_stack[-1]
else:
node.parent = None
nested = False
if node.is_prange:
if not node.parent:
node.is_parallel = True
else:
node.is_parallel = (node.parent.is_prange or not
node.parent.is_parallel)
nested = node.parent.is_prange
else:
node.is_parallel = True
# Note: nested with parallel() blocks are handled by
# ParallelRangeTransform!
# nested = node.parent
nested = node.parent and node.parent.is_prange
self.parallel_block_stack.append(node)
nested = nested or len(self.parallel_block_stack) > 2
if not self.parallel_errors and nested and not node.is_prange:
error(node.pos, "Only prange() may be nested")
self.parallel_errors = True
if node.is_prange:
child_attrs = node.child_attrs
node.child_attrs = ['body', 'target', 'args']
self.visitchildren(node)
node.child_attrs = child_attrs
self.parallel_block_stack.pop()
if node.else_clause:
node.else_clause = self.visit(node.else_clause)
else:
self.visitchildren(node)
self.parallel_block_stack.pop()
self.parallel_errors = False
return node
def visit_YieldExprNode(self, node):
if self.parallel_block_stack:
error(node.pos, "Yield not allowed in parallel sections")
return node
def visit_ReturnStatNode(self, node):
node.in_parallel = bool(self.parallel_block_stack)
return node
class MarkOverflowingArithmetic(CythonTransform):
# It may be possible to integrate this with the above for
# performance improvements (though likely not worth it).
might_overflow = False
def __call__(self, root):
self.env_stack = []
self.env = root.scope
return super(MarkOverflowingArithmetic, self).__call__(root)
def visit_safe_node(self, node):
self.might_overflow, saved = False, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_neutral_node(self, node):
self.visitchildren(node)
return node
def visit_dangerous_node(self, node):
self.might_overflow, saved = True, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_FuncDefNode(self, node):
self.env_stack.append(self.env)
self.env = node.local_scope
self.visit_safe_node(node)
self.env = self.env_stack.pop()
return node
def visit_NameNode(self, node):
if self.might_overflow:
entry = node.entry or self.env.lookup(node.name)
if entry:
entry.might_overflow = True
return node
def visit_BinopNode(self, node):
if node.operator in '&|^':
return self.visit_neutral_node(node)
else:
return self.visit_dangerous_node(node)
visit_UnopNode = visit_neutral_node
visit_UnaryMinusNode = visit_dangerous_node
visit_InPlaceAssignmentNode = visit_dangerous_node
visit_Node = visit_safe_node
def visit_assignment(self, lhs, rhs):
if (isinstance(rhs, ExprNodes.IntNode)
and isinstance(lhs, ExprNodes.NameNode)
and Utils.long_literal(rhs.value)):
entry = lhs.entry or self.env.lookup(lhs.name)
if entry:
entry.might_overflow = True
def visit_SingleAssignmentNode(self, node):
self.visit_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.visit_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
class PyObjectTypeInferer(object):
"""
If it's not declared, it's a PyObject.
"""
def infer_types(self, scope):
"""
Given a dict of entries, map all unspecified types to a specified type.
"""
for name, entry in scope.entries.items():
if entry.type is unspecified_type:
entry.type = py_object_type
class SimpleAssignmentTypeInferer(object):
"""
Very basic type inference.
Note: in order to support cross-closure type inference, this must be
applies to nested scopes in top-down order.
"""
def set_entry_type(self, entry, entry_type):
entry.type = entry_type
for e in entry.all_entries():
e.type = entry_type
def infer_types(self, scope):
enabled = scope.directives['infer_types']
verbose = scope.directives['infer_types.verbose']
if enabled == True:
spanning_type = aggressive_spanning_type
elif enabled is None: # safe mode
spanning_type = safe_spanning_type
else:
for entry in scope.entries.values():
if entry.type is unspecified_type:
self.set_entry_type(entry, py_object_type)
return
# Set of assignemnts
assignments = set([])
assmts_resolved = set([])
dependencies = {}
assmt_to_names = {}
for name, entry in scope.entries.items():
for assmt in entry.cf_assignments:
names = assmt.type_dependencies()
assmt_to_names[assmt] = names
assmts = set()
for node in names:
assmts.update(node.cf_state)
dependencies[assmt] = assmts
if entry.type is unspecified_type:
assignments.update(entry.cf_assignments)
else:
assmts_resolved.update(entry.cf_assignments)
def infer_name_node_type(node):
types = [assmt.inferred_type for assmt in node.cf_state]
if not types:
node_type = py_object_type
else:
entry = node.entry
node_type = spanning_type(
types, entry.might_overflow, entry.pos)
node.inferred_type = node_type
def infer_name_node_type_partial(node):
types = [assmt.inferred_type for assmt in node.cf_state
if assmt.inferred_type is not None]
if not types:
return
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos)
def resolve_assignments(assignments):
resolved = set()
for assmt in assignments:
deps = dependencies[assmt]
# All assignments are resolved
if assmts_resolved.issuperset(deps):
for node in assmt_to_names[assmt]:
infer_name_node_type(node)
# Resolve assmt
inferred_type = assmt.infer_type()
assmts_resolved.add(assmt)
resolved.add(assmt)
assignments.difference_update(resolved)
return resolved
def partial_infer(assmt):
partial_types = []
for node in assmt_to_names[assmt]:
partial_type = infer_name_node_type_partial(node)
if partial_type is None:
return False
partial_types.append((node, partial_type))
for node, partial_type in partial_types:
node.inferred_type = partial_type
assmt.infer_type()
return True
partial_assmts = set()
def resolve_partial(assignments):
# try to handle circular references
partials = set()
for assmt in assignments:
if assmt in partial_assmts:
continue
if partial_infer(assmt):
partials.add(assmt)
assmts_resolved.add(assmt)
partial_assmts.update(partials)
return partials
# Infer assignments
while True:
if not resolve_assignments(assignments):
if not resolve_partial(assignments):
break
inferred = set()
# First pass
for entry in scope.entries.values():
if entry.type is not unspecified_type:
continue
entry_type = py_object_type
if assmts_resolved.issuperset(entry.cf_assignments):
types = [assmt.inferred_type for assmt in entry.cf_assignments]
if types and Utils.all(types):
entry_type = spanning_type(
types, entry.might_overflow, entry.pos)
inferred.add(entry)
self.set_entry_type(entry, entry_type)
def reinfer():
dirty = False
for entry in inferred:
types = [assmt.infer_type()
for assmt in entry.cf_assignments]
new_type = spanning_type(types, entry.might_overflow, entry.pos)
if new_type != entry.type:
self.set_entry_type(entry, new_type)
dirty = True
return dirty
# types propagation
while reinfer():
pass
if verbose:
for entry in inferred:
message(entry.pos, "inferred '%s' to be of type '%s'" % (
entry.name, entry.type))
def find_spanning_type(type1, type2):
if type1 is type2:
result_type = type1
elif type1 is PyrexTypes.c_bint_type or type2 is PyrexTypes.c_bint_type:
# type inference can break the coercion back to a Python bool
# if it returns an arbitrary int type here
return py_object_type
else:
result_type = PyrexTypes.spanning_type(type1, type2)
if result_type in (PyrexTypes.c_double_type, PyrexTypes.c_float_type,
Builtin.float_type):
# Python's float type is just a C double, so it's safe to
# use the C type instead
return PyrexTypes.c_double_type
return result_type
def aggressive_spanning_type(types, might_overflow, pos):
result_type = reduce(find_spanning_type, types)
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
return result_type
def safe_spanning_type(types, might_overflow, pos):
result_type = reduce(find_spanning_type, types)
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
if result_type.is_pyobject:
# In theory, any specific Python type is always safe to
# infer. However, inferring str can cause some existing code
# to break, since we are also now much more strict about
# coercion from str to char *. See trac #553.
if result_type.name == 'str':
return py_object_type
else:
return result_type
elif result_type is PyrexTypes.c_double_type:
# Python's float type is just a C double, so it's safe to use
# the C type instead
return result_type
elif result_type is PyrexTypes.c_bint_type:
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
elif result_type.is_ptr:
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject, and inferring char* is now accepted, too.
return result_type
elif result_type.is_cpp_class:
# These can't implicitly become Python objects either.
return result_type
elif result_type.is_struct:
# Though we have struct -> object for some structs, this is uncommonly
# used, won't arise in pure Python, and there shouldn't be side
# effects, so I'm declaring this safe.
return result_type
# TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif (result_type.is_int or result_type.is_enum) and not might_overflow:
return result_type
return py_object_type
def get_type_inferer():
return SimpleAssignmentTypeInferer()
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/third_party/cython/src/Cython/Compiler/TypeInference.py
|
Python
|
mit
| 20,732
|
[
"VisIt"
] |
1dcfedb1ffe1ddd26a2a4f5a0efe7d5273250c02d8fd9276f036dd522428fdb7
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import array as pyarray
import warnings
if sys.version > '3':
xrange = range
basestring = str
from math import exp, log
from numpy import array, random, tile
from collections import namedtuple
from pyspark import SparkContext, since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, callJavaFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector, DenseVector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable
from pyspark.streaming import DStream
__all__ = ['BisectingKMeansModel', 'BisectingKMeans', 'KMeansModel', 'KMeans',
'GaussianMixtureModel', 'GaussianMixture', 'PowerIterationClusteringModel',
'PowerIterationClustering', 'StreamingKMeans', 'StreamingKMeansModel',
'LDA', 'LDAModel']
@inherit_doc
class BisectingKMeansModel(JavaModelWrapper):
"""
A clustering model derived from the bisecting k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> bskm = BisectingKMeans()
>>> model = bskm.train(sc.parallelize(data, 2), k=4)
>>> p = array([0.0, 0.0])
>>> model.predict(p)
0
>>> model.k
4
>>> model.computeCost(p)
0.0
.. versionadded:: 2.0.0
"""
def __init__(self, java_model):
super(BisectingKMeansModel, self).__init__(java_model)
self.centers = [c.toArray() for c in self.call("clusterCenters")]
@property
@since('2.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy
arrays."""
return self.centers
@property
@since('2.0.0')
def k(self):
"""Get the number of clusters"""
return self.call("k")
@since('2.0.0')
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
:param x:
A data point (or RDD of points) to determine cluster index.
:return:
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("predict", vecs)
x = _convert_to_vector(x)
return self.call("predict", x)
@since('2.0.0')
def computeCost(self, x):
"""
Return the Bisecting K-means cost (sum of squared distances of
points to their nearest center) for this model on the given
data. If provided with an RDD of points returns the sum.
:param point:
A data point (or RDD of points) to compute the cost(s).
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("computeCost", vecs)
return self.call("computeCost", _convert_to_vector(x))
class BisectingKMeans(object):
"""
A bisecting k-means algorithm based on the paper "A comparison of
document clustering techniques" by Steinbach, Karypis, and Kumar,
with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and
bisects each of them using k-means, until there are `k` leaf
clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped
together to increase parallelism. If bisecting all divisible
clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
Based on
U{http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf}
Steinbach, Karypis, and Kumar, A comparison of document clustering
techniques, KDD Workshop on Text Mining, 2000.
.. versionadded:: 2.0.0
"""
@classmethod
@since('2.0.0')
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604):
"""
Runs the bisecting k-means algorithm return the model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
The desired number of leaf clusters. The actual number could
be smaller if there are no divisible leaf clusters.
(default: 4)
:param maxIterations:
Maximum number of iterations allowed to split clusters.
(default: 20)
:param minDivisibleClusterSize:
Minimum number of points (if >= 1.0) or the minimum proportion
of points (if < 1.0) of a divisible cluster.
(default: 1)
:param seed:
Random seed value for cluster initialization.
(default: -1888008604 from classOf[BisectingKMeans].getName.##)
"""
java_model = callMLlibFunc(
"trainBisectingKMeans", rdd.map(_convert_to_vector),
k, maxIterations, minDivisibleClusterSize, seed)
return BisectingKMeansModel(java_model)
@inherit_doc
class KMeansModel(Saveable, Loader):
"""A clustering model derived from the k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, initializationMode="random",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model.k
2
>>> model.computeCost(sc.parallelize(data))
2.0000000000000004
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> isinstance(model.clusterCenters, list)
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = KMeansModel.load(sc, path)
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-383.1,-382.9, 28.7,31.2, 366.2,367.3]).reshape(3, 2)
>>> model = KMeans.train(sc.parallelize(data), 3, maxIterations=0,
... initialModel = KMeansModel([(-1000.0,-1000.0),(5.0,5.0),(1000.0,1000.0)]))
>>> model.clusterCenters
[array([-1000., -1000.]), array([ 5., 5.]), array([ 1000., 1000.])]
.. versionadded:: 0.9.0
"""
def __init__(self, centers):
self.centers = centers
@property
@since('1.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
@property
@since('1.4.0')
def k(self):
"""Total number of clusters."""
return len(self.centers)
@since('0.9.0')
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
:param x:
A data point (or RDD of points) to determine cluster index.
:return:
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
best = 0
best_distance = float("inf")
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
for i in xrange(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
@since('1.4.0')
def computeCost(self, rdd):
"""
Return the K-means cost (sum of squared distances of points to
their nearest center) for this model on the given
data.
:param rdd:
The RDD of points to compute the cost on.
"""
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
[_convert_to_vector(c) for c in self.centers])
return cost
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
class KMeans(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||",
seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None):
"""
Train a k-means clustering model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
Number of clusters to create.
:param maxIterations:
Maximum number of iterations allowed.
(default: 100)
:param runs:
This param has no effect since Spark 2.0.0.
:param initializationMode:
The initialization algorithm. This can be either "random" or
"k-means||".
(default: "k-means||")
:param seed:
Random seed value for cluster initialization. Set as None to
generate seed based on system time.
(default: None)
:param initializationSteps:
Number of steps for the k-means|| initialization mode.
This is an advanced setting -- the default of 2 is almost
always enough.
(default: 2)
:param epsilon:
Distance threshold within which a center will be considered to
have converged. If all centers move less than this Euclidean
distance, iterations are stopped.
(default: 1e-4)
:param initialModel:
Initial cluster centers can be provided as a KMeansModel object
rather than using the random or k-means|| initializationModel.
(default: None)
"""
if runs != 1:
warnings.warn("The param `runs` has no effect since Spark 2.0.0.")
clusterInitialModel = []
if initialModel is not None:
if not isinstance(initialModel, KMeansModel):
raise Exception("initialModel is of "+str(type(initialModel))+". It needs "
"to be of <type 'KMeansModel'>")
clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters]
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
runs, initializationMode, seed, initializationSteps, epsilon,
clusterInitialModel)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
@inherit_doc
class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A clustering model derived from the Gaussian Mixture Model method.
>>> from pyspark.mllib.linalg import Vectors, DenseMatrix
>>> from numpy.testing import assert_equal
>>> from shutil import rmtree
>>> import os, tempfile
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2), 2)
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
False
>>> labels[4]==labels[5]
True
>>> model.predict([-0.1,-0.05])
0
>>> softPredicted = model.predictSoft([-0.1,-0.05])
>>> abs(softPredicted[0] - 1.0) < 0.001
True
>>> abs(softPredicted[1] - 0.0) < 0.001
True
>>> abs(softPredicted[2] - 0.0) < 0.001
True
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = GaussianMixtureModel.load(sc, path)
>>> assert_equal(model.weights, sameModel.weights)
>>> mus, sigmas = list(
... zip(*[(g.mu, g.sigma) for g in model.gaussians]))
>>> sameMus, sameSigmas = list(
... zip(*[(g.mu, g.sigma) for g in sameModel.gaussians]))
>>> mus == sameMus
True
>>> sigmas == sameSigmas
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734])
>>> clusterdata_2 = sc.parallelize(data.reshape(5,3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=4)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]
True
>>> labels[2]==labels[3]==labels[4]
True
.. versionadded:: 1.3.0
"""
@property
@since('1.4.0')
def weights(self):
"""
Weights for each Gaussian distribution in the mixture, where weights[i] is
the weight for Gaussian i, and weights.sum == 1.
"""
return array(self.call("weights"))
@property
@since('1.4.0')
def gaussians(self):
"""
Array of MultivariateGaussian where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i.
"""
return [
MultivariateGaussian(gaussian[0], gaussian[1])
for gaussian in self.call("gaussians")]
@property
@since('1.4.0')
def k(self):
"""Number of gaussians in mixture."""
return len(self.weights)
@since('1.3.0')
def predict(self, x):
"""
Find the cluster to which the point 'x' or each point in RDD 'x'
has maximum membership in this model.
:param x:
A feature vector or an RDD of vectors representing data points.
:return:
Predicted cluster label or an RDD of predicted cluster labels
if the input is an RDD.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
else:
z = self.predictSoft(x)
return z.argmax()
@since('1.3.0')
def predictSoft(self, x):
"""
Find the membership of point 'x' or each point in RDD 'x' to all mixture components.
:param x:
A feature vector or an RDD of vectors representing data points.
:return:
The membership value to all mixture components for vector 'x'
or each vector in RDD 'x'.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
_convert_to_vector(self.weights), means, sigmas)
return membership_matrix.map(lambda x: pyarray.array('d', x))
else:
return self.call("predictSoft", _convert_to_vector(x)).toArray()
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""Load the GaussianMixtureModel from disk.
:param sc:
SparkContext.
:param path:
Path to where the model is stored.
"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.org.apache.spark.mllib.api.python.GaussianMixtureModelWrapper(model)
return cls(wrapper)
class GaussianMixture(object):
"""
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
.. versionadded:: 1.3.0
"""
@classmethod
@since('1.3.0')
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None):
"""
Train a Gaussian Mixture clustering model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
Number of independent Gaussians in the mixture model.
:param convergenceTol:
Maximum change in log-likelihood at which convergence is
considered to have occurred.
(default: 1e-3)
:param maxIterations:
Maximum number of iterations allowed.
(default: 100)
:param seed:
Random seed for initial Gaussian distribution. Set as None to
generate seed based on system time.
(default: None)
:param initialModel:
Initial GMM starting point, bypassing the random
initialization.
(default: None)
"""
initialModelWeights = None
initialModelMu = None
initialModelSigma = None
if initialModel is not None:
if initialModel.k != k:
raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s"
% (initialModel.k, k))
initialModelWeights = list(initialModel.weights)
initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)]
initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)]
java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector),
k, convergenceTol, maxIterations, seed,
initialModelWeights, initialModelMu, initialModelSigma)
return GaussianMixtureModel(java_model)
class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
Model produced by [[PowerIterationClustering]].
>>> import math
>>> def genCircle(r, n):
... points = []
... for i in range(0, n):
... theta = 2.0 * math.pi * i / n
... points.append((r * math.cos(theta), r * math.sin(theta)))
... return points
>>> def sim(x, y):
... dist2 = (x[0] - y[0]) * (x[0] - y[0]) + (x[1] - y[1]) * (x[1] - y[1])
... return math.exp(-dist2 / 2.0)
>>> r1 = 1.0
>>> n1 = 10
>>> r2 = 4.0
>>> n2 = 40
>>> n = n1 + n2
>>> points = genCircle(r1, n1) + genCircle(r2, n2)
>>> similarities = [(i, j, sim(points[i], points[j])) for i in range(1, n) for j in range(0, i)]
>>> rdd = sc.parallelize(similarities, 2)
>>> model = PowerIterationClustering.train(rdd, 2, 40)
>>> model.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = PowerIterationClusteringModel.load(sc, path)
>>> sameModel.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.5.0
"""
@property
@since('1.5.0')
def k(self):
"""
Returns the number of clusters.
"""
return self.call("k")
@since('1.5.0')
def assignments(self):
"""
Returns the cluster assignments of this model.
"""
return self.call("getAssignments").map(
lambda x: (PowerIterationClustering.Assignment(*x)))
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
model = cls._load_java(sc, path)
wrapper =\
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper)
class PowerIterationClustering(object):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm
developed by [[http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf Lin and Cohen]].
From the abstract: PIC finds a very low-dimensional embedding of a
dataset using truncated power iteration on a normalized pair-wise
similarity matrix of the data.
.. versionadded:: 1.5.0
"""
@classmethod
@since('1.5.0')
def train(cls, rdd, k, maxIterations=100, initMode="random"):
"""
:param rdd:
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
:param k:
Number of clusters.
:param maxIterations:
Maximum number of iterations of the PIC algorithm.
(default: 100)
:param initMode:
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random")
"""
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model)
class Assignment(namedtuple("Assignment", ["id", "cluster"])):
"""
Represents an (id, cluster) tuple.
.. versionadded:: 1.5.0
"""
class StreamingKMeansModel(KMeansModel):
"""
Clustering model which can perform an online update of the centroids.
The update formula for each centroid is given by
* c_t+1 = ((c_t * n_t * a) + (x_t * m_t)) / (n_t + m_t)
* n_t+1 = n_t * a + m_t
where
* c_t: Centroid at the n_th iteration.
* n_t: Number of samples (or) weights associated with the centroid
at the n_th iteration.
* x_t: Centroid of the new data closest to c_t.
* m_t: Number of samples (or) weights of the new data closest to c_t
* c_t+1: New centroid.
* n_t+1: New number of weights.
* a: Decay Factor, which gives the forgetfulness.
.. note:: If a is set to 1, it is the weighted mean of the previous
and new data. If it set to zero, the old centroids are completely
forgotten.
:param clusterCenters:
Initial cluster centers.
:param clusterWeights:
List of weights assigned to each cluster.
>>> initCenters = [[0.0, 0.0], [1.0, 1.0]]
>>> initWeights = [1.0, 1.0]
>>> stkm = StreamingKMeansModel(initCenters, initWeights)
>>> data = sc.parallelize([[-0.1, -0.1], [0.1, 0.1],
... [0.9, 0.9], [1.1, 1.1]])
>>> stkm = stkm.update(data, 1.0, u"batches")
>>> stkm.centers
array([[ 0., 0.],
[ 1., 1.]])
>>> stkm.predict([-0.1, -0.1])
0
>>> stkm.predict([0.9, 0.9])
1
>>> stkm.clusterWeights
[3.0, 3.0]
>>> decayFactor = 0.0
>>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])])
>>> stkm = stkm.update(data, 0.0, u"batches")
>>> stkm.centers
array([[ 0.2, 0.2],
[ 1.5, 1.5]])
>>> stkm.clusterWeights
[1.0, 1.0]
>>> stkm.predict([0.2, 0.2])
0
>>> stkm.predict([1.5, 1.5])
1
.. versionadded:: 1.5.0
"""
def __init__(self, clusterCenters, clusterWeights):
super(StreamingKMeansModel, self).__init__(centers=clusterCenters)
self._clusterWeights = list(clusterWeights)
@property
@since('1.5.0')
def clusterWeights(self):
"""Return the cluster weights."""
return self._clusterWeights
@ignore_unicode_prefix
@since('1.5.0')
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self
class StreamingKMeans(object):
"""
Provides methods to set k, decayFactor, timeUnit to configure the
KMeans algorithm for fitting and predicting on incoming dstreams.
More details on how the centroids are updated are provided under the
docs of StreamingKMeansModel.
:param k:
Number of clusters.
(default: 2)
:param decayFactor:
Forgetfulness of the previous centroids.
(default: 1.0)
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor is
raised to the power of number of new points and if batches, then
decay factor will be used as is.
(default: "batches")
.. versionadded:: 1.5.0
"""
def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"):
self._k = k
self._decayFactor = decayFactor
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
self._timeUnit = timeUnit
self._model = None
@since('1.5.0')
def latestModel(self):
"""Return the latest model"""
return self._model
def _validate(self, dstream):
if self._model is None:
raise ValueError(
"Initial centers should be set either by setInitialCenters "
"or setRandomCenters.")
if not isinstance(dstream, DStream):
raise TypeError(
"Expected dstream to be of type DStream, "
"got type %s" % type(dstream))
@since('1.5.0')
def setK(self, k):
"""Set number of clusters."""
self._k = k
return self
@since('1.5.0')
def setDecayFactor(self, decayFactor):
"""Set decay factor."""
self._decayFactor = decayFactor
return self
@since('1.5.0')
def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self
@since('1.5.0')
def setInitialCenters(self, centers, weights):
"""
Set initial centers. Should be set before calling trainOn.
"""
self._model = StreamingKMeansModel(centers, weights)
return self
@since('1.5.0')
def setRandomCenters(self, dim, weight, seed):
"""
Set the initial centres to be random samples from
a gaussian population with constant weights.
"""
rng = random.RandomState(seed)
clusterCenters = rng.randn(self._k, dim)
clusterWeights = tile(weight, self._k)
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
self._model.update(rdd, self._decayFactor, self._timeUnit)
dstream.foreachRDD(update)
@since('1.5.0')
def predictOn(self, dstream):
"""
Make predictions on a dstream.
Returns a transformed dstream object
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
@since('1.5.0')
def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
class LDAModel(JavaModelWrapper, JavaSaveable, Loader):
""" A clustering model derived from the LDA method.
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology
- "word" = "term": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over words representing some concept
References:
- Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
>>> from pyspark.mllib.linalg import Vectors
>>> from numpy.testing import assert_almost_equal, assert_equal
>>> data = [
... [1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],
... ]
>>> rdd = sc.parallelize(data)
>>> model = LDA.train(rdd, k=2, seed=1)
>>> model.vocabSize()
2
>>> model.describeTopics()
[([1, 0], [0.5..., 0.49...]), ([0, 1], [0.5..., 0.49...])]
>>> model.describeTopics(1)
[([1], [0.5...]), ([0], [0.5...])]
>>> topics = model.topicsMatrix()
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
>>> assert_almost_equal(topics, topics_expect, 1)
>>> import os, tempfile
>>> from shutil import rmtree
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = LDAModel.load(sc, path)
>>> assert_equal(sameModel.topicsMatrix(), model.topicsMatrix())
>>> sameModel.vocabSize() == model.vocabSize()
True
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.5.0
"""
@since('1.5.0')
def topicsMatrix(self):
"""Inferred topics, where each topic is represented by a distribution over terms."""
return self.call("topicsMatrix").toArray()
@since('1.5.0')
def vocabSize(self):
"""Vocabulary size (number of terms or terms in the vocabulary)"""
return self.call("vocabSize")
@since('1.6.0')
def describeTopics(self, maxTermsPerTopic=None):
"""Return the topics described by weighted terms.
WARNING: If vocabSize and k are large, this can return a large object!
:param maxTermsPerTopic:
Maximum number of terms to collect for each topic.
(default: vocabulary size)
:return:
Array over topics. Each topic is represented as a pair of
matching arrays: (term indices, term weights in topic).
Each topic's terms are sorted in order of decreasing weight.
"""
if maxTermsPerTopic is None:
topics = self.call("describeTopics")
else:
topics = self.call("describeTopics", maxTermsPerTopic)
return topics
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""Load the LDAModel from disk.
:param sc:
SparkContext.
:param path:
Path to where the model is stored.
"""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
model = callMLlibFunc("loadLDAModel", sc, path)
return LDAModel(model)
class LDA(object):
"""
.. versionadded:: 1.5.0
"""
@classmethod
@since('1.5.0')
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
:param rdd:
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term count vectors are "bags of
words" with a fixed-size vocabulary (where the vocabulary size
is the length of the vector). Document IDs must be unique
and >= 0.
:param k:
Number of topics to infer, i.e., the number of soft cluster
centers.
(default: 10)
:param maxIterations:
Maximum number of iterations allowed.
(default: 20)
:param docConcentration:
Concentration parameter (commonly named "alpha") for the prior
placed on documents' distributions over topics ("theta").
(default: -1.0)
:param topicConcentration:
Concentration parameter (commonly named "beta" or "eta") for
the prior placed on topics' distributions over terms.
(default: -1.0)
:param seed:
Random seed for cluster initialization. Set as None to generate
seed based on system time.
(default: None)
:param checkpointInterval:
Period (in iterations) between checkpoints.
(default: 10)
:param optimizer:
LDAOptimizer used to perform the actual calculation. Currently
"em", "online" are supported.
(default: "em")
"""
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations,
docConcentration, topicConcentration, seed,
checkpointInterval, optimizer)
return LDAModel(model)
def _test():
import doctest
import pyspark.mllib.clustering
globs = pyspark.mllib.clustering.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
ron8hu/spark
|
python/pyspark/mllib/clustering.py
|
Python
|
apache-2.0
| 36,951
|
[
"Gaussian"
] |
1de707560e50c0ae16b37aa71c2c4fd78dee33d3c1ad732ee30f8619a0c24ad1
|
#!/usr/bin/python
# script preparing tab file to plot in R for small RNA profiling
# version 1 29-1-2012
# Usage plotter.py <bowtie input> <min size> <max size> <normalization factor> <tabular output>
import sys
def acquisition (file2parse, sizerange):
F = open (file2parse)
plus_table = {}
minus_table = {}
for line in F:
field = line.split()
coordinate = int( field[3] )
strand = field[1]
sequence = field[4]
size = len (sequence )
if strand == "+" and size in sizerange:
plus_table[coordinate] = plus_table.get(coordinate, 0) + 1
if strand == "-" and size in sizerange:
coordinate = coordinate + size -1 # 23-11-2012 : this line was missing ! it is a BUG that probably altered the Nature maps :-((
minus_table[coordinate] = minus_table.get(coordinate, 0) + 1
return plus_table, minus_table
def output_table (plus_table, minus_table, Nfactor, output):
Nfactor = float(Nfactor)
plus_coordinates = set( plus_table.keys() )
minus_coordinates = set( minus_table.keys() )
coords = sorted (plus_coordinates.union (minus_coordinates) )
## added 23-2-2013 to have, instead, exaustive coordinates
## coords = range (min(coords), max(coords) + 1)
##
OUT = open (output, "w")
print >> OUT, "coord\tplus\tminus"
for coordinate in coords :
print >> OUT, "%s\t%s\t%s" % ( coordinate, plus_table.get(coordinate, 0)*Nfactor, - minus_table.get(coordinate, 0)*Nfactor )
def sizing (minsize, maxsize) :
size_range = range ( int (minsize), int (maxsize) + 1 )
return size_range
plus_table, minus_table = acquisition (sys.argv[1], sizing ( sys.argv[2], sys.argv[3] ) )
output_table ( plus_table, minus_table, sys.argv[4], sys.argv[5] )
|
mvdbeek/tools-artbio
|
unstable/local_tools/plotter.py
|
Python
|
mit
| 1,722
|
[
"Bowtie"
] |
3b7754702a69ce59f24e05bbb8b6e010cbad3d3bf2e0c7ae4f82718990d3dc51
|
import json
import uuid
from datetime import timedelta, datetime
import responses
from django.contrib.auth.models import User
from django.test import TestCase
from django.db.models.signals import post_save
from django.conf import settings
from django.core.cache import cache
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.authtoken.models import Token
from rest_hooks.models import model_saved, Hook
from requests_testadapter import TestAdapter, TestSession
from go_http.metrics import MetricsApiClient
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from registrations import tasks
from .models import (Source, Registration, SubscriptionRequest,
registration_post_save, fire_created_metric,
fire_language_metric, fire_source_metric)
from .tasks import (
validate_registration, send_location_reminders,
is_valid_date, is_valid_uuid, is_valid_lang, is_valid_msg_type,
is_valid_msg_receiver, is_valid_loss_reason, is_valid_name,
repopulate_metrics)
from familyconnect_registration import utils
def override_get_today():
return datetime.strptime("20150817", "%Y%m%d")
class RecordingAdapter(TestAdapter):
""" Record the request that was handled by the adapter.
"""
def __init__(self, *args, **kwargs):
self.requests = []
super(RecordingAdapter, self).__init__(*args, **kwargs)
def send(self, request, *args, **kw):
self.requests.append(request)
return super(RecordingAdapter, self).send(request, *args, **kw)
REG_FIELDS = {
"hw_pre": [
"hoh_id", "operator_id", "language", "msg_type",
"last_period_date", "msg_receiver", "hoh_name", "hoh_surname",
"mama_name", "mama_surname"],
}
REG_DATA = {
"hw_pre_hoh": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20150202",
"msg_receiver": "head_of_household",
"hoh_name": "bob",
"hoh_surname": "the builder",
"mama_name": "sue",
"mama_surname": "zin",
},
"hw_pre_mother": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20150202", # 28 weeks pregnant
"msg_receiver": "mother_to_be",
"hoh_name": "bob",
"hoh_surname": "the builder",
"mama_name": "sue",
"mama_surname": "zin",
},
"hw_pre_family": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "friend01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20150202",
"msg_receiver": "family_member",
"hoh_name": "bob",
"hoh_surname": "the builder",
"mama_name": "sue",
"mama_surname": "zin",
},
"hw_pre_friend": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "friend01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20150202",
"msg_receiver": "trusted_friend",
"hoh_name": "bob",
"hoh_surname": "the builder",
"mama_name": "sue",
"mama_surname": "zin",
},
"pbl_pre": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"operator_id": None,
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20150202",
"msg_receiver": "mother_to_be",
"parish": "Kawaaga",
"vht_id": "vht00001-63e2-4acc-9b94-26663b9bc267"
},
"pbl_loss": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "friend01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"loss_reason": "miscarriage"
},
"bad_data_combination": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "friend01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20150202",
"msg_receiver": "trusted_friend",
"hoh_name": "bob",
"hoh_surname": "the builder",
},
"bad_fields": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "friend01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "2015020",
"msg_receiver": "trusted friend",
"hoh_name": "bob",
"hoh_surname": "the builder",
"mama_name": "sue",
"mama_surname": "zin",
},
"bad_lmp": {
"hoh_id": "hoh00001-63e2-4acc-9b94-26663b9bc267",
"receiver_id": "friend01-63e2-4acc-9b94-26663b9bc267",
"operator_id": "hcw00001-63e2-4acc-9b94-26663b9bc267",
"language": "eng_UG",
"msg_type": "text",
"last_period_date": "20140202",
"msg_receiver": "trusted_friend",
"hoh_name": "bob",
"hoh_surname": "the builder",
"mama_name": "sue",
"mama_surname": "zin",
},
}
class APITestCase(TestCase):
def setUp(self):
self.adminclient = APIClient()
self.normalclient = APIClient()
self.otherclient = APIClient()
self.session = TestSession()
utils.get_today = override_get_today
class AuthenticatedAPITestCase(APITestCase):
def _replace_post_save_hooks(self):
def has_listeners():
return post_save.has_listeners(Registration)
assert has_listeners(), (
"Registration model has no post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
post_save.disconnect(receiver=registration_post_save,
sender=Registration)
post_save.disconnect(receiver=model_saved,
dispatch_uid='instance-saved-hook')
post_save.disconnect(receiver=fire_created_metric, sender=Registration)
post_save.disconnect(receiver=fire_language_metric,
sender=Registration)
post_save.disconnect(receiver=fire_source_metric, sender=Registration)
assert not has_listeners(), (
"Registration model still has post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
def _restore_post_save_hooks(self):
def has_listeners():
return post_save.has_listeners(Registration)
assert not has_listeners(), (
"Registration model still has post_save listeners. Make sure"
" helpers removed them properly in earlier tests.")
post_save.connect(registration_post_save, sender=Registration)
post_save.connect(receiver=fire_created_metric, sender=Registration)
post_save.connect(receiver=fire_language_metric, sender=Registration)
post_save.connect(receiver=fire_source_metric, sender=Registration)
def _replace_get_metric_client(self, session=None):
return MetricsApiClient(
auth_token=settings.METRICS_AUTH_TOKEN,
api_url=settings.METRICS_URL,
session=self.session)
def make_source_adminuser(self):
data = {
"name": "test_source_adminuser",
"authority": "hw_full",
"user": User.objects.get(username='testadminuser')
}
return Source.objects.create(**data)
def make_source_normaluser(self):
data = {
"name": "test_source_normaluser",
"authority": "patient",
"user": User.objects.get(username='testnormaluser')
}
return Source.objects.create(**data)
def make_registration_adminuser(self):
data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": {
"test_adminuser_reg_key": "test_adminuser_reg_value",
"language": "eng_UG"
},
"source": self.make_source_adminuser()
}
return Registration.objects.create(**data)
def make_registration_normaluser(self):
data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": {"test_normaluser_reg_key": "test_normaluser_reg_value"},
"source": self.make_source_normaluser()
}
return Registration.objects.create(**data)
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
self._replace_post_save_hooks()
tasks.get_metric_client = self._replace_get_metric_client
# Normal User setup
self.normalusername = 'testnormaluser'
self.normalpassword = 'testnormalpass'
self.normaluser = User.objects.create_user(
self.normalusername,
'testnormaluser@example.com',
self.normalpassword)
normaltoken = Token.objects.create(user=self.normaluser)
self.normaltoken = normaltoken.key
self.normalclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.normaltoken)
# Admin User setup
self.adminusername = 'testadminuser'
self.adminpassword = 'testadminpass'
self.adminuser = User.objects.create_superuser(
self.adminusername,
'testadminuser@example.com',
self.adminpassword)
admintoken = Token.objects.create(user=self.adminuser)
self.admintoken = admintoken.key
self.adminclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.admintoken)
def tearDown(self):
self._restore_post_save_hooks()
class TestLogin(AuthenticatedAPITestCase):
def test_login_normaluser(self):
""" Test that normaluser can login successfully
"""
# Setup
post_auth = {"username": "testnormaluser",
"password": "testnormalpass"}
# Execute
request = self.client.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNotNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(
request.status_code, 200,
"Status code on /api/token-auth was %s (should be 200)."
% request.status_code)
def test_login_adminuser(self):
""" Test that adminuser can login successfully
"""
# Setup
post_auth = {"username": "testadminuser",
"password": "testadminpass"}
# Execute
request = self.client.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNotNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(
request.status_code, 200,
"Status code on /api/token-auth was %s (should be 200)."
% request.status_code)
def test_login_adminuser_wrong_password(self):
""" Test that adminuser cannot log in with wrong password
"""
# Setup
post_auth = {"username": "testadminuser",
"password": "wrongpass"}
# Execute
request = self.client.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)
def test_login_otheruser(self):
""" Test that an unknown user cannot log in
"""
# Setup
post_auth = {"username": "testotheruser",
"password": "testotherpass"}
# Execute
request = self.otherclient.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)
class TestSourceAPI(AuthenticatedAPITestCase):
def test_get_source_adminuser(self):
# Setup
source = self.make_source_adminuser()
# Execute
response = self.adminclient.get('/api/v1/source/%s/' % source.id,
format='json',
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["authority"], "hw_full")
self.assertEqual(response.data["name"], "test_source_adminuser")
def test_get_source_normaluser(self):
# Setup
source = self.make_source_normaluser()
# Execute
response = self.normalclient.get('/api/v1/source/%s/' % source.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_source_adminuser(self):
# Setup
user = User.objects.get(username='testadminuser')
post_data = {
"name": "test_source_name",
"authority": "patient",
"user": "/api/v1/user/%s/" % user.id
}
# Execute
response = self.adminclient.post('/api/v1/source/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Source.objects.last()
self.assertEqual(d.name, 'test_source_name')
self.assertEqual(d.authority, "patient")
def test_create_source_normaluser(self):
# Setup
user = User.objects.get(username='testnormaluser')
post_data = {
"name": "test_source_name",
"authority": "hw_full",
"user": "/api/v1/user/%s/" % user.id
}
# Execute
response = self.normalclient.post('/api/v1/source/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestRegistrationAPI(AuthenticatedAPITestCase):
def test_get_registration_adminuser(self):
# Setup
registration = self.make_registration_adminuser()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/%s/' % registration.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["stage"], "prebirth")
self.assertEqual(response.data["data"]["test_adminuser_reg_key"],
"test_adminuser_reg_value")
def test_get_registration_normaluser(self):
# Setup
registration = self.make_registration_normaluser()
# Execute
response = self.normalclient.get(
'/api/v1/registrations/%s/' % registration.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["stage"], "prebirth")
self.assertEqual(response.data["data"]["test_normaluser_reg_key"],
"test_normaluser_reg_value")
def test_create_registration_adminuser(self):
# Setup
self.make_source_adminuser()
post_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.adminclient.post('/api/v1/registration/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Registration.objects.last()
self.assertEqual(d.source.name, 'test_source_adminuser')
self.assertEqual(d.stage, 'prebirth')
self.assertEqual(d.mother_id, "mother01-63e2-4acc-9b94-26663b9bc267")
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"test_key1": "test_value1"})
def test_create_registration_normaluser(self):
# Setup
self.make_source_normaluser()
post_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.normalclient.post('/api/v1/registration/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Registration.objects.last()
self.assertEqual(d.source.name, 'test_source_normaluser')
self.assertEqual(d.stage, 'prebirth')
self.assertEqual(d.mother_id, "mother01-63e2-4acc-9b94-26663b9bc267")
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"test_key1": "test_value1"})
def test_create_registration_set_readonly_field(self):
# Setup
self.make_source_adminuser()
post_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": {"test_key1": "test_value1"},
"validated": True
}
# Execute
response = self.adminclient.post('/api/v1/registration/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Registration.objects.last()
self.assertEqual(d.source.name, 'test_source_adminuser')
self.assertEqual(d.stage, 'prebirth')
self.assertEqual(d.mother_id, "mother01-63e2-4acc-9b94-26663b9bc267")
self.assertEqual(d.validated, False) # Should ignore True post_data
self.assertEqual(d.data, {"test_key1": "test_value1"})
def test_list_registrations(self):
# Setup
registration1 = self.make_registration_normaluser()
registration2 = self.make_registration_adminuser()
# Execute
response = self.normalclient.get(
'/api/v1/registrations/', content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
result1, result2 = response.data["results"]
self.assertEqual(result1["id"], str(registration1.id))
self.assertEqual(result2["id"], str(registration2.id))
def make_different_registrations(self):
self.make_source_adminuser()
registration1_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_hoh"].copy(),
"source": self.make_source_adminuser(),
"validated": True
}
registration1 = Registration.objects.create(**registration1_data)
registration2_data = {
"stage": "postbirth",
"mother_id": "mother02-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_hoh"].copy(),
"source": self.make_source_normaluser(),
"validated": False
}
registration2 = Registration.objects.create(**registration2_data)
return (registration1, registration2)
def test_filter_registration_mother_id(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?mother_id=%s' % registration1.mother_id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration1.id))
def test_filter_registration_stage(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?stage=%s' % registration2.stage,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration2.id))
def test_filter_registration_validated(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?validated=%s' % registration1.validated,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration1.id))
def test_filter_registration_source(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?source=%s' % registration2.source.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration2.id))
def test_filter_registration_created_after(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# While the '+00:00' is valid according to ISO 8601, the version of
# django-filter we are using does not support it
date_string = registration2.created_at.isoformat().replace(
"+00:00", "Z")
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?created_after=%s' % date_string,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration2.id))
def test_filter_registration_created_before(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# While the '+00:00' is valid according to ISO 8601, the version of
# django-filter we are using does not support it
date_string = registration1.created_at.isoformat().replace(
"+00:00", "Z")
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?created_before=%s' % date_string,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration1.id))
def test_filter_registration_no_matches(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?mother_id=test_id',
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 0)
def test_filter_registration_unknown_filter(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?something=test_id',
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
class TestFieldValidation(AuthenticatedAPITestCase):
def test_is_valid_date(self):
# Setup
good_date = "19820315"
invalid_date = "19830229"
bad_date = "1234"
# Execute
# Check
self.assertEqual(is_valid_date(good_date), True)
self.assertEqual(is_valid_date(invalid_date), False)
self.assertEqual(is_valid_date(bad_date), False)
def test_is_valid_uuid(self):
# Setup
valid_uuid = str(uuid.uuid4())
invalid_uuid = "f9bfa2d7-5b62-4011-8eac-76bca34781a"
# Execute
# Check
self.assertEqual(is_valid_uuid(valid_uuid), True)
self.assertEqual(is_valid_uuid(invalid_uuid), False)
def test_is_valid_lang(self):
# Setup
valid_lang = "lug_UG"
invalid_lang = "lusoga"
# Execute
# Check
self.assertEqual(is_valid_lang(valid_lang), True)
self.assertEqual(is_valid_lang(invalid_lang), False)
def test_is_valid_msg_type(self):
# Setup
valid_msg_type = "text"
invalid_msg_type = "voice"
# Execute
# Check
self.assertEqual(is_valid_msg_type(valid_msg_type), True)
self.assertEqual(is_valid_msg_type(invalid_msg_type), False)
def test_is_valid_msg_receiver(self):
# Setup
valid_msg_receiver = "head_of_household"
invalid_msg_receiver = "mama"
# Execute
# Check
self.assertEqual(is_valid_msg_receiver(valid_msg_receiver), True)
self.assertEqual(is_valid_msg_receiver(invalid_msg_receiver), False)
def test_is_valid_loss_reason(self):
# Setup
valid_loss_reason = "miscarriage"
invalid_loss_reason = "other"
# Execute
# Check
self.assertEqual(is_valid_loss_reason(valid_loss_reason), True)
self.assertEqual(is_valid_loss_reason(invalid_loss_reason), False)
def test_is_valid_name(self):
# Setup
valid_name1 = "Namey"
valid_name2 = "Zoé"
valid_name3 = "1234"
invalid_name = 10375075
# Execute
# Check
self.assertEqual(is_valid_name(valid_name1), True)
self.assertEqual(is_valid_name(valid_name2), True)
self.assertEqual(is_valid_name(valid_name3), True) # TODO reject
self.assertEqual(is_valid_name(invalid_name), False)
def test_check_field_values(self):
# Setup
valid_hw_pre_registration_data = REG_DATA["hw_pre_mother"]
invalid_hw_pre_registration_data = REG_DATA[
"hw_pre_mother"].copy()
invalid_hw_pre_registration_data["msg_receiver"] = "somebody"
# Execute
cfv_valid = validate_registration.check_field_values(
REG_FIELDS["hw_pre"], valid_hw_pre_registration_data)
cfv_invalid = validate_registration.check_field_values(
REG_FIELDS["hw_pre"], invalid_hw_pre_registration_data)
# Check
self.assertEqual(cfv_valid, [])
self.assertEqual(cfv_invalid, ['msg_receiver'])
class TestRegistrationValidation(AuthenticatedAPITestCase):
def test_validate_hw_prebirth_hoh(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_hoh"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "hw_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
def test_validate_hw_prebirth_mother(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_mother"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "hw_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
def test_validate_hw_prebirth_family(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_family"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "hw_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
def test_validate_hw_prebirth_friend(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "hw_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
@responses.activate
def test_validate_pbl_prebirth_vht(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["pbl_pre"].copy(),
"source": self.make_source_normaluser()
}
registration = Registration.objects.create(**registration_data)
# mock vht identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % registration_data[
"data"]["vht_id"],
json={"id": "vht00001-63e2-4acc-9b94-26663b9bc267"}
)
# mock mother address lookup
responses.add(
responses.GET,
('http://localhost:8001/api/v1/identities/%s/addresses/msisdn?'
'default=True') % registration_data["mother_id"],
json={"results": [{"address": "+4321"}]},
match_querystring=True,
)
# mock vht address lookup
responses.add(
responses.GET,
('http://localhost:8001/api/v1/identities/%s/addresses/msisdn?'
'default=True') % registration_data["data"]["vht_id"],
json={"results": [{"address": "+1234"}]},
match_querystring=True,
)
# moch message send
responses.add(
responses.POST,
'http://localhost:8006/api/v1/outbound/',
json={'id': 1})
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "pbl_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
sms_http_call = responses.calls[-1].request
self.assertEqual(json.loads(sms_http_call.body), {
"content": (
"There is a new pregnancy in your parish. "
"Call +4321 and visit the mother to update her registration."),
"to_addr": "+1234",
"metadata": {}})
@responses.activate
def test_validate_pbl_prebirth_location(self):
# Setup
data = REG_DATA["pbl_pre"].copy()
data.pop('vht_id')
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": data,
"source": self.make_source_normaluser()
}
registration = Registration.objects.create(**registration_data)
# mock vht identities lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/search?details__has_key'
'=personnel_code&details_parish=%s' % registration_data[
"data"]["parish"],
json={"results": [
{"id": "vht00001-63e2-4acc-9b94-26663b9bc267"},
{"id": "vht00002-63e2-4acc-9b94-26663b9bc267"},
]},
match_querystring=True,
)
# mock mother address lookup
responses.add(
responses.GET,
('http://localhost:8001/api/v1/identities/%s/addresses/msisdn?'
'default=True') % registration_data["mother_id"],
json={"results": [{"address": "+4321"}]},
match_querystring=True,
)
# mock vht1 address lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/vht00001-63e2-4acc-9b94'
'-26663b9bc267/addresses/msisdn?default=True',
json={"results": [{"address": "+1234"}]},
match_querystring=True,
)
# mock vht2 address lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/vht00002-63e2-4acc-9b94'
'-26663b9bc267/addresses/msisdn?default=True',
json={"results": [{"address": "+2234"}]},
match_querystring=True,
)
# moch message send
responses.add(
responses.POST,
'http://localhost:8006/api/v1/outbound/',
json={'id': 1})
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "pbl_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
[sms01, sms02] = filter(
lambda r: r.request.url == 'http://localhost:8006/api/v1/'
'outbound/', responses.calls)
self.assertEqual(json.loads(sms01.request.body), {
"content": (
"There is a new pregnancy in your parish. "
"Call +4321 and visit the mother to update her registration."),
"to_addr": "+1234",
"metadata": {}})
self.assertEqual(json.loads(sms02.request.body), {
"content": (
"There is a new pregnancy in your parish. "
"Call +4321 and visit the mother to update her registration."),
"to_addr": "+2234",
"metadata": {}})
def test_validate_pbl_loss(self):
# Setup
registration_data = {
"stage": "loss",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["pbl_loss"].copy(),
"source": self.make_source_normaluser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "pbl_loss")
self.assertEqual(registration.validated, True)
def test_validate_pregnancy_too_long(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_mother"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["last_period_date"] = "20130101"
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, False)
self.assertEqual(registration.validated, False)
def test_validate_pregnancy_too_short(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_mother"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["last_period_date"] = "20150816"
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, False)
self.assertEqual(registration.validated, False)
@responses.activate
def test_validate_registration_run_success(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_mother"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# mock messageset lookup
query_string = '?short_name=prebirth.mother.hw_full'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"count": 1,
"next": None,
"previous": None,
"results": [{
"id": 1,
"short_name": 'prebirth.mother.hw_full',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,4"},
status=200, content_type='application/json',
)
# mock mother identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % registration_data[
"mother_id"],
json={
"id": registration_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {}
},
"receiver_role": "mother_to_be",
"health_id": 9999999999
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json'
)
# mock Mother MSISDN lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/mother01-63e2-4acc-9b94-26663b9bc267/addresses/msisdn?default=True', # noqa
json={
"count": 1, "next": None, "previous": None,
"results": [{"address": "+256123"}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock SMS send
responses.add(
responses.POST,
'http://localhost:8006/api/v1/outbound/',
json={"id": 1},
status=200, content_type='application/json',
)
# Execute
result = validate_registration.apply_async(args=[registration.id])
# Check
self.assertEqual(result.get(), "Validation completed - Success")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "mother01-63e2-4acc-9b94-26663b9bc267")
self.assertEqual(d.messageset, 1)
self.assertEqual(d.next_sequence_number, 48) # (28-4)*2
self.assertEqual(d.lang, "eng_UG")
self.assertEqual(d.schedule, 1)
def test_validate_registration_run_failure_bad_combination(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["bad_data_combination"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
result = validate_registration.apply_async(args=[registration.id])
# Check
self.assertEqual(result.get(), "Validation completed - Failure")
d = Registration.objects.get(id=registration.id)
self.assertEqual(d.data["invalid_fields"],
"Invalid combination of fields")
def test_validate_registration_run_failure_bad_fields(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["bad_fields"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
result = validate_registration.apply_async(args=[registration.id])
# Check
self.assertEqual(result.get(), "Validation completed - Failure")
d = Registration.objects.get(id=registration.id)
self.assertEqual(sorted(d.data["invalid_fields"]),
sorted(["msg_receiver", "last_period_date"]))
def test_validate_registration_run_failure_bad_lmp(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["bad_lmp"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
result = validate_registration.apply_async(args=[registration.id])
# Check
self.assertEqual(result.get(), "Validation completed - Failure")
d = Registration.objects.get(id=registration.id)
self.assertEqual(d.data["invalid_fields"],
["last_period_date out of range"])
class TestSubscriptionRequest(AuthenticatedAPITestCase):
@responses.activate
def test_hoh_prebirth_patient(self):
# Setup
# prepare registration data
registration_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_hoh"].copy(),
"source": self.make_source_normaluser()
}
registration_data["data"]["preg_week"] = 15
registration = Registration.objects.create(**registration_data)
# mock messageset lookup
query_string = '?short_name=prebirth.household.patient'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"count": 1,
"next": None,
"previous": None,
"results": [{
"id": 2,
"short_name": 'prebirth.household.patient',
"default_schedule": 2
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/2/',
json={"id": 2, "day_of_week": "1"},
status=200, content_type='application/json',
)
# mock mother identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % registration_data[
"mother_id"],
json={
"id": registration_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {}
},
"receiver_role": "mother_to_be",
"health_id": 7777777777
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json'
)
# mock HOH MSISDN lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/hoh00001-63e2-4acc-9b94-26663b9bc267/addresses/msisdn?default=True', # noqa
json={
"count": 1, "next": None, "previous": None,
"results": [{"address": "+256124"}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock SMS send
responses.add(
responses.POST,
'http://localhost:8006/api/v1/outbound/',
json={"id": 1},
status=200, content_type='application/json',
)
# Execute
result = validate_registration.create_subscriptionrequests(
registration)
# Check
self.assertEqual(result, "SubscriptionRequest created")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "mother01-63e2-4acc-9b94-26663b9bc267")
self.assertEqual(d.messageset, 2)
self.assertEqual(d.next_sequence_number, 11) # (15-4)*1
self.assertEqual(d.lang, "eng_UG")
self.assertEqual(d.schedule, 2)
class TestSubscriptionRequestWebhook(AuthenticatedAPITestCase):
def test_create_webhook(self):
# Setup
user = User.objects.get(username='testadminuser')
post_data = {
"target": "http://example.com/registration/",
"event": "subscriptionrequest.added"
}
# Execute
response = self.adminclient.post('/api/v1/webhook/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Hook.objects.last()
self.assertEqual(d.target, 'http://example.com/registration/')
self.assertEqual(d.user, user)
# This test is not working despite the code working fine
# If you run these same steps below interactively the webhook will fire
# @responses.activate
# def test_mother_only_webhook(self):
# # Setup
# post_save.connect(receiver=model_saved, sender=SubscriptionRequest,
# dispatch_uid='instance-saved-hook')
# Hook.objects.create(user=self.adminuser,
# event='subscriptionrequest.added',
# target='http://example.com/registration/')
#
# expected_webhook = {
# "hook": {
# "target": "http://example.com/registration/",
# "event": "subscriptionrequest.added",
# "id": 3
# },
# "data": {
# "messageset": 1,
# "updated_at": "2016-02-17T07:59:42.831568+00:00",
# "identity": "mother01-63e2-4acc-9b94-26663b9bc267",
# "lang": "eng_NG",
# "created_at": "2016-02-17T07:59:42.831533+00:00",
# "id": "5282ed58-348f-4a54-b1ff-f702e36ec3cc",
# "next_sequence_number": 1,
# "schedule": 1
# }
# }
# responses.add(
# responses.POST,
# "http://example.com/registration/",
# json.dumps(expected_webhook),
# status=200, content_type='application/json')
# registration_data = {
# "stage": "prebirth",
# "mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
# "data": REG_DATA["hw_pre_id_mother"].copy(),
# "source": self.make_source_adminuser()
# }
# registration = Registration.objects.create(**registration_data)
# # Execute
# result = validate_registration.create_subscriptionrequests(
# registration)
# # Check
# self.assertEqual(result, "SubscriptionRequest created")
# d = SubscriptionRequest.objects.last()
# self.assertEqual(d.identity,
# "mother01-63e2-4acc-9b94-26663b9bc267")
# self.assertEqual(d.messageset, 1)
# self.assertEqual(d.next_sequence_number, 1)
# self.assertEqual(d.lang, "eng_NG")
# self.assertEqual(d.schedule, 1)
# self.assertEqual(responses.calls[0].request.url,
# "http://example.com/registration/")
class TestRegistrationModel(AuthenticatedAPITestCase):
def test_validated_filter(self):
"""
The validated queryset filter should only return validated
registrations.
"""
r1 = self.make_registration_adminuser()
r1.validated = True
r1.save()
r2 = self.make_registration_adminuser()
self.assertFalse(r2.validated)
[reg] = Registration.objects.validated()
self.assertEqual(reg.pk, r1.pk)
def test_public_registrations_filter(self):
"""
The public registrations filter should only return registrations
make through public sources.
"""
r1 = self.make_registration_normaluser()
self.assertEqual(r1.source.authority, 'patient')
r2 = self.make_registration_adminuser()
self.assertEqual(r2.source.authority, 'hw_full')
[reg] = Registration.objects.public_registrations()
self.assertEqual(reg.pk, r1.pk)
class TestSendLocationRemindersTask(AuthenticatedAPITestCase):
@responses.activate
def test_send_location_reminder(self):
"""
The send_location_reminder should send the correct message according
to the given recipient and language.
"""
responses.add(
responses.POST,
'http://localhost:8006/api/v1/outbound/',
json={'id': 1})
responses.add(
responses.GET,
('http://localhost:8001/api/v1/identities/%s/addresses/msisdn?'
'default=True') % 'mother01-63e2-4acc-9b94-26663b9bc267',
json={"results": [{"address": "+4321"}]},
match_querystring=True,
)
send_location_reminders.send_location_reminder(
'mother01-63e2-4acc-9b94-26663b9bc267', 'eng_UG')
sms_http_call = responses.calls[-1].request
self.assertEqual(sms_http_call.body, json.dumps({
"content": (
"To make sure you can receive care from your local VHT, please"
" dial in to *XXX*X# and add your location. FamilyConnect"),
"to_addr": "+4321",
"metadata": {}}))
def test_send_locations_task(self):
"""
The send_locations_reminder task should look up registrations, and send
messages to the correct ones.
"""
# Should be called
r1 = self.make_registration_normaluser()
r1.validated = True
r1.data['receiver_id'] = 'mother01-63e2-4acc-9b94-26663b9bc267'
r1.data['language'] = 'eng_UG'
r1.save()
# Not public, shouldn't be called
r2 = self.make_registration_adminuser()
self.assertEqual(r2.source.authority, 'hw_full')
# Should be called
r3 = self.make_registration_normaluser()
r3.validated = True
r3.data['receiver_id'] = 'mother03-63e2-4acc-9b94-26663b9bc267'
r3.data['language'] = 'cgg_UG'
r3.data['parish'] = None
r3.save()
# Not validated, shouldn't be called
r4 = self.make_registration_normaluser()
self.assertFalse(r4.validated)
# Has location, shouldn't be called
r5 = self.make_registration_normaluser()
r5.validated = True
r5.data['parish'] = 'Kawaaga'
r5.save()
with patch.object(send_location_reminders, 'send_location_reminder') \
as send_location_reminder:
send_location_reminders.run()
self.assertEqual(send_location_reminder.call_count, 2)
send_location_reminder.assert_any_call(
'mother01-63e2-4acc-9b94-26663b9bc267', 'eng_UG')
send_location_reminder.assert_any_call(
'mother03-63e2-4acc-9b94-26663b9bc267', 'cgg_UG')
class TestUserCreation(AuthenticatedAPITestCase):
def test_create_user_and_token(self):
# Setup
user_request = {"email": "test@example.org"}
# Execute
request = self.adminclient.post('/api/v1/user/token/', user_request)
token = request.json().get('token', None)
# Check
self.assertIsNotNone(
token, "Could not receive authentication token on post.")
self.assertEqual(
request.status_code, 201,
"Status code on /api/v1/user/token/ was %s (should be 201)."
% request.status_code)
def test_create_user_and_token_fail_nonadmin(self):
# Setup
user_request = {"email": "test@example.org"}
# Execute
request = self.normalclient.post('/api/v1/user/token/', user_request)
error = request.json().get('detail', None)
# Check
self.assertIsNotNone(
error, "Could not receive error on post.")
self.assertEqual(
error, "You do not have permission to perform this action.",
"Error message was unexpected: %s."
% error)
def test_create_user_and_token_not_created(self):
# Setup
user_request = {"email": "test@example.org"}
# Execute
request = self.adminclient.post('/api/v1/user/token/', user_request)
token = request.json().get('token', None)
# And again, to get the same token
request2 = self.adminclient.post('/api/v1/user/token/', user_request)
token2 = request2.json().get('token', None)
# Check
self.assertEqual(
token, token2,
"Tokens are not equal, should be the same as not recreated.")
def test_create_user_new_token_nonadmin(self):
# Setup
user_request = {"email": "test@example.org"}
request = self.adminclient.post('/api/v1/user/token/', user_request)
token = request.json().get('token', None)
cleanclient = APIClient()
cleanclient.credentials(HTTP_AUTHORIZATION='Token %s' % token)
# Execute
request = cleanclient.post('/api/v1/user/token/', user_request)
error = request.json().get('detail', None)
# Check
# new user should not be admin
self.assertIsNotNone(
error, "Could not receive error on post.")
self.assertEqual(
error, "You do not have permission to perform this action.",
"Error message was unexpected: %s."
% error)
class TestMetricsAPI(AuthenticatedAPITestCase):
def test_metrics_read(self):
# Setup
self.make_source_normaluser()
self.make_source_adminuser()
# Execute
response = self.adminclient.get(
'/api/metrics/', content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(response.data["metrics_available"]), sorted([
'registrations.created.sum',
'registrations.created.total.last',
'registrations.language.eng_UG.sum',
'registrations.language.cgg_UG.sum',
'registrations.language.xog_UG.sum',
'registrations.language.lug_UG.sum',
'registrations.language.eng_UG.total.last',
'registrations.language.cgg_UG.total.last',
'registrations.language.xog_UG.total.last',
'registrations.language.lug_UG.total.last',
'registrations.source.patient.sum',
'registrations.source.patient.total.last',
'registrations.source.advisor.sum',
'registrations.source.advisor.total.last',
'registrations.source.hw_limited.sum',
'registrations.source.hw_limited.total.last',
'registrations.source.hw_full.sum',
'registrations.source.hw_full.total.last',
])
)
@responses.activate
def test_post_metrics(self):
# Setup
# deactivate Testsession for this test
self.session = None
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
# Execute
response = self.adminclient.post(
'/api/metrics/', content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data["scheduled_metrics_initiated"], True)
class TestMetrics(AuthenticatedAPITestCase):
def _check_request(
self, request, method, params=None, data=None, headers=None):
self.assertEqual(request.method, method)
if params is not None:
url = urlparse.urlparse(request.url)
qs = urlparse.parse_qsl(url.query)
self.assertEqual(dict(qs), params)
if headers is not None:
for key, value in headers.items():
self.assertEqual(request.headers[key], value)
if data is None:
self.assertEqual(request.body, None)
else:
self.assertEqual(json.loads(request.body), data)
def _mount_session(self):
response = [{
'name': 'foo',
'value': 9000,
'aggregator': 'bar',
}]
adapter = RecordingAdapter(json.dumps(response).encode('utf-8'))
self.session.mount(
"http://metrics-url/metrics/", adapter)
return adapter
def test_direct_fire(self):
# Setup
adapter = self._mount_session()
# Execute
result = tasks.fire_metric.apply_async(kwargs={
"metric_name": 'foo.last',
"metric_value": 1,
"session": self.session
})
# Check
[request] = adapter.requests
self._check_request(
request, 'POST',
data={"foo.last": 1.0}
)
self.assertEqual(result.get(),
"Fired metric <foo.last> with value <1.0>")
def test_created_metric(self):
# Setup
adapter = self._mount_session()
# reconnect metric post_save hook
post_save.connect(fire_created_metric, sender=Registration)
# Execute
self.make_registration_adminuser()
self.make_registration_adminuser()
# Check
[request1, request2, request3, request4] = adapter.requests
self._check_request(
request1, 'POST',
data={"registrations.created.sum": 1.0}
)
self._check_request(
request2, 'POST',
data={"registrations.created.total.last": 1}
)
self._check_request(
request3, 'POST',
data={"registrations.created.sum": 1.0}
)
self._check_request(
request4, 'POST',
data={"registrations.created.total.last": 2}
)
# remove post_save hooks to prevent teardown errors
post_save.disconnect(fire_created_metric, sender=Registration)
def test_language_metric(self):
"""
When creating a registration, two metrics should be fired for the
receiver type that the registration is created for. One of type sum
with a value of 1, and one of type last with the current total.
"""
adapter = self._mount_session()
post_save.connect(fire_language_metric, sender=Registration)
cache.clear()
self.make_registration_adminuser()
self.make_registration_adminuser()
[r_sum1, r_total1, r_sum2, r_total2] = adapter.requests
self._check_request(
r_sum1, 'POST',
data={"registrations.language.eng_UG.sum": 1.0}
)
self._check_request(
r_total1, 'POST',
data={"registrations.language.eng_UG.total.last": 1.0}
)
self._check_request(
r_sum2, 'POST',
data={"registrations.language.eng_UG.sum": 1.0}
)
self._check_request(
r_total2, 'POST',
data={"registrations.language.eng_UG.total.last": 2.0}
)
post_save.disconnect(fire_language_metric, sender=Registration)
def test_source_metric(self):
"""
When creating a registration, two metrics should be fired for the
receiver type that the registration is created for. One of type sum
with a value of 1, and one of type last with the current total.
"""
adapter = self._mount_session()
post_save.connect(fire_source_metric, sender=Registration)
cache.clear()
self.make_registration_adminuser()
self.make_registration_adminuser()
[r_sum1, r_total1, r_sum2, r_total2] = adapter.requests
self._check_request(
r_sum1, 'POST',
data={"registrations.source.hw_full.sum": 1.0}
)
self._check_request(
r_total1, 'POST',
data={"registrations.source.hw_full.total.last": 1.0}
)
self._check_request(
r_sum2, 'POST',
data={"registrations.source.hw_full.sum": 1.0}
)
self._check_request(
r_total2, 'POST',
data={"registrations.source.hw_full.total.last": 2.0}
)
post_save.disconnect(fire_source_metric, sender=Registration)
class TestRepopulateMetricsTask(TestCase):
@patch('registrations.tasks.pika')
@patch('registrations.tasks.RepopulateMetrics.generate_and_send')
def test_run_repopulate_metrics(self, mock_repopulate, mock_pika):
"""
The repopulate metrics task should create an amqp connection, and call
generate_and_send with the appropriate parameters.
"""
repopulate_metrics.delay(
'amqp://test', 'prefix', ['metric.foo', 'metric.bar'], '30s:1m')
args = [args for args, _ in mock_repopulate.call_args_list]
# Relative instead of absolute times
start = min(args, key=lambda a: a[3])[3]
args = [[a, p, m, s-start, e-start] for a, p, m, s, e in args]
connection = mock_pika.BlockingConnection.return_value
channel = connection.channel.return_value
expected = [
[channel, 'prefix', 'metric.foo',
timedelta(seconds=0), timedelta(seconds=30)],
[channel, 'prefix', 'metric.foo',
timedelta(seconds=30), timedelta(seconds=60)],
[channel, 'prefix', 'metric.bar',
timedelta(seconds=0), timedelta(seconds=30)],
[channel, 'prefix', 'metric.bar',
timedelta(seconds=30), timedelta(seconds=60)],
]
self.assertEqual(sorted(expected), sorted(args))
# Assert that the amqp parameters were set from the correc url
[url], _ = mock_pika.URLParameters.call_args
self.assertEqual(url, 'amqp://test')
# Assert that the connection was created with the generated parameters
[parameters], _ = mock_pika.BlockingConnection.call_args
self.assertEqual(parameters, mock_pika.URLParameters.return_value)
@patch('registrations.tasks.MetricGenerator.generate_metric')
@patch('registrations.tasks.send_metric')
def test_generate_and_send(
self, mock_send_metric, mock_metric_generator):
"""
The generate_and_send function should use the metric generator to
generate the appropriate metric, then send that metric to Graphite.
"""
mock_metric_generator.return_value = 17.2
repopulate_metrics.generate_and_send(
'amqp://foo', 'prefix', 'foo.bar',
datetime.utcfromtimestamp(300.0), datetime.utcfromtimestamp(500.0))
mock_metric_generator.assert_called_once_with(
'foo.bar', datetime.utcfromtimestamp(300),
datetime.utcfromtimestamp(500))
mock_send_metric.assert_called_once_with(
'amqp://foo', 'prefix', 'foo.bar', 17.2,
datetime.utcfromtimestamp(400))
|
praekelt/familyconnect-registration
|
registrations/tests.py
|
Python
|
bsd-3-clause
| 66,498
|
[
"VisIt"
] |
bc49471160614e67223530e31c953182713ca1a5c2e073c4d76ff6ad9f17cdec
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test_support import *
print '1. Parser'
print '1.1 Tokens'
print '1.1.1 Backslashes'
# Backslash means line continuation:
x = 1 \
+ 1
if x <> 2: raise TestFailed, 'backslash for line continuation'
# Backslash does not means continuation in comments :\
x = 0
if x <> 0: raise TestFailed, 'backslash ending comment'
print '1.1.2 Numeric literals'
print '1.1.2.1 Plain integers'
if 0xff <> 255: raise TestFailed, 'hex int'
if 0377 <> 255: raise TestFailed, 'octal int'
if 2147483647 != 017777777777: raise TestFailed, 'large positive int'
try:
from sys import maxint
except ImportError:
maxint = 2147483647
if maxint == 2147483647:
if -2147483647-1 != 020000000000: raise TestFailed, 'max negative int'
# XXX -2147483648
if 037777777777 != -1: raise TestFailed, 'oct -1'
if 0xffffffff != -1: raise TestFailed, 'hex -1'
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
continue
## raise TestFailed, \
print \
'No OverflowError on huge integer literal ' + `s`
elif eval('maxint == 9223372036854775807'):
if eval('-9223372036854775807-1 != 01000000000000000000000'):
raise TestFailed, 'max negative int'
if eval('01777777777777777777777') != -1: raise TestFailed, 'oct -1'
if eval('0xffffffffffffffff') != -1: raise TestFailed, 'hex -1'
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
continue
raise TestFailed, \
'No OverflowError on huge integer literal ' + `s`
else:
print 'Weird maxint value', maxint
print '1.1.2.2 Long integers'
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
print '1.1.2.3 Floating point'
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
print '1.1.3 String literals'
##def assert(s):
## if not s: raise TestFailed, 'see traceback'
x = ''; y = ""; assert(len(x) == 0 and x == y)
x = '\''; y = "'"; assert(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; assert(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
assert(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
assert(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
assert(x == y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''; assert(x == y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"; assert(x == y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'; assert(x == y)
print '1.2 Grammar'
print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
print 'file_input' # (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
print 'expr_input' # testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
print 'eval_input' # testlist ENDMARKER
x = eval('1, 0 or 1')
print 'funcdef'
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): pass
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
### stmt: simple_stmt | compound_stmt
# Tested below
### simple_stmt: small_stmt (';' small_stmt)* [';']
print 'simple_stmt'
x = 1; pass; del x
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
print 'expr_stmt' # (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
# NB these variables are deleted below
print 'print_stmt' # 'print' (test ',')* [test]
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
print 'extended print_stmt' # 'print' '>>' test ','
import sys
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
# syntax errors
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
check_syntax('print ,')
check_syntax('print >> x,')
print 'del_stmt' # 'del' exprlist
del abc
del x, y, (z, xyz)
print 'pass_stmt' # 'pass'
pass
print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
print 'break_stmt' # 'break'
while 1: break
print 'continue_stmt' # 'continue'
i = 1
while i: i = 0; continue
print 'return_stmt' # 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
print 'raise_stmt' # 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
print 'import_stmt' # 'import' NAME (',' NAME)* | 'from' NAME 'import' ('*' | NAME (',' NAME)*)
import sys
import time, sys
from time import time
from sys import *
from sys import path, argv
print 'global_stmt' # 'global' NAME (',' NAME)*
def f():
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
def f():
z = None
del z
exec 'z=1+1\n'
if z <> 2: raise TestFailed, 'exec \'z=1+1\'\\n'
del z
exec 'z=1+1'
if z <> 2: raise TestFailed, 'exec \'z=1+1\''
z = None
del z
exec u'z=1+1\n'
if z <> 2: raise TestFailed, 'exec u\'z=1+1\'\\n'
del z
exec u'z=1+1'
if z <> 2: raise TestFailed, 'exec u\'z=1+1\''
f()
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g <> {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
g = {}
l = {}
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) <> ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g, l'
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285: raise TestFailed, 'for over growing sequence'
print 'try_stmt'
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [',' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError, msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
print 'test'
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
print 'comparison'
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
print 'binary mask ops'
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
print 'shift ops'
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
print 'additive ops'
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
print 'multiplicative ops'
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
print 'unary ops'
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
print 'selectors'
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
print
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
print 'atoms'
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
print 'classdef' # 'class' NAME ['(' testlist ')'] ':' suite
class B: pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
print [s.strip() for s in spcs]
print [3 * x for x in nums]
print [x for x in nums if x > 2]
print [(i, s) for i in nums for s in strs]
print [(i, s) for i in nums for s in [f for f in strs if "n" in f]]
try:
eval("[i, s for i in nums for s in strs]")
print "FAIL: should have raised a SyntaxError!"
except SyntaxError:
print "good: got a SyntaxError as expected"
try:
eval("[x if y]")
print "FAIL: should have raised a SyntaxError!"
except SyntaxError:
print "good: got a SyntaxError as expected"
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
print [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
|
atmark-techno/atmark-dist
|
user/python/Lib/test/test_grammar.py
|
Python
|
gpl-2.0
| 13,663
|
[
"GULP"
] |
4c201bfd81d165e65528bf0f00ac87adcf9dee1932464de01837a8d61779c51d
|
"""
Just-in-time compilation support.
"""
import abc
import copy
import os
import re
import ast
import logging
import inspect
import hashlib
import json
from collections import namedtuple
import tempfile
import ctree
from ctree.nodes import Project
from ctree.analyses import VerifyOnlyCtreeNodes
from ctree.frontend import get_ast
from ctree.transforms import DeclarationFiller
from ctree.c.nodes import CFile, MultiNode
if ctree.OCL_ENABLED:
from ctree.ocl.nodes import OclFile
from ctree.nodes import File
log = logging.getLogger(__name__)
def getFile(filepath):
"""
Takes a filepath and returns a specialized File instance (i.e. OclFile,
CFile, etc)
"""
file_types = [CFile]
if ctree.OCL_ENABLED:
file_types.append(OclFile)
ext_map = {'.'+t._ext: t for t in file_types}
path, filename = os.path.split(filepath)
name, ext = os.path.splitext(filename)
filetype = ext_map[ext]
return filetype(name=name, path=path)
class JitModule(object):
"""
Manages compilation of multiple ASTs.
"""
def __init__(self):
import os
# write files to $TEMPDIR/ctree/run-XXXX
ctree_dir = os.path.join(tempfile.gettempdir(), "ctree")
if not os.path.exists(ctree_dir):
os.mkdir(ctree_dir)
self.compilation_dir = tempfile.mkdtemp(prefix="run-", dir=ctree_dir)
self.ll_module = None
self.exec_engine = None
def _link_in(self, submodule):
self.so_file_name = submodule
# if self.ll_module is not None:
# self.ll_module.link_in(submodule)
# else:
# self.ll_module = submodule
def get_callable(self, entry_point_name, entry_point_typesig):
"""
Returns a python callable that dispatches to the requested C function.
"""
# get llvm represetation of function
# ll_function = self.ll_module.get_function(entry_point_name)
import ctypes
lib = ctypes.cdll.LoadLibrary(self.so_file_name)
func_ptr = getattr(lib, entry_point_name)
func_ptr.argtypes = entry_point_typesig._argtypes_
func_ptr.restype = entry_point_typesig._restype_
# func = func_ptr
# run jit compiler
# from llvm.ee import EngineBuilder
# self.exec_engine = llvm.create_jit_compiler(self.ll_module)
# c_func_ptr = self.exec_engine.get_pointer_to_global(ll_function)
# cast c_func_ptr to python callable using ctypes
return func_ptr
class ConcreteSpecializedFunction(object):
"""
A function backed by generated code.
"""
__metaclass__ = abc.ABCMeta
def _compile(self, entry_point_name, project_node, entry_point_typesig,
**kwargs):
"""
Returns a python callable.
"""
assert isinstance(project_node, Project), \
"Expected a Project but it got a %s." % type(project_node)
VerifyOnlyCtreeNodes().visit(project_node)
self._module = project_node.codegen(**kwargs)
# if log.getEffectiveLevel() == 'debug':
# highlighted = highlight(str(self._module.ll_module), 'llvm')
# log.debug("full LLVM program is: <<<\n%s\n>>>" % highlighted)
return self._module.get_callable(entry_point_name, entry_point_typesig)
@abc.abstractmethod
def __call__(self, *args, **kwargs):
pass
class LazySpecializedFunction(object):
"""
A callable object that will produce executable
code just-in-time.
"""
ProgramConfig = namedtuple('ProgramConfig',
['args_subconfig', 'tuner_subconfig'])
_directory_fields = ['__class__.__name__', 'backend_name']
class NameExtractor(ast.NodeVisitor):
"""
Extracts the first functiondef name found
"""
def visit_FunctionDef(self, node):
return node.name
def generic_visit(self, node):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
res = self.visit(item)
if res:
return res
elif isinstance(value, ast.AST):
res = self.visit(value)
if res:
return res
def __init__(self, py_ast=None, sub_dir=None, backend_name="default"):
if py_ast is not None and \
self.apply is not LazySpecializedFunction.apply:
raise TypeError('Cannot define apply and pass py_ast')
self.original_tree = py_ast or \
(get_ast(self.apply)
if self.apply is not LazySpecializedFunction.apply else None)
self.concrete_functions = {} # config -> callable map
self._tuner = self.get_tuning_driver()
self.sub_dir = sub_dir or \
self.NameExtractor().visit(self.original_tree) or \
hex(hash(self))[2:]
self.backend_name = backend_name
@property
def original_tree(self):
return copy.deepcopy(self._original_tree)
@original_tree.setter
def original_tree(self, value):
if not hasattr(self, '_original_tree'):
self._original_tree = value
elif ast.dump(self.__original_tree, True, True) != \
ast.dump(value, True, True):
raise AttributeError('Cannot redefine the ast')
@property
def info_filename(self):
return 'info.json'
def get_info(self, path):
info_filepath = os.path.join(path, self.info_filename)
if not os.path.exists(info_filepath):
return {'hash': None, 'files': []}
with open(info_filepath) as info_file:
return json.load(info_file)
def set_info(self, path, dictionary):
info_filepath = os.path.join(path, self.info_filename)
with open(info_filepath, 'w') as info_file:
return json.dump(dictionary, info_file)
@staticmethod
def _hash(o):
if isinstance(o, dict):
return hash(frozenset(
LazySpecializedFunction._hash(item) for item in o.items()
))
else:
return hash(str(o))
def __hash__(self):
mro = type(self).mro()
result = hashlib.sha512(''.encode())
for klass in mro:
if issubclass(klass, LazySpecializedFunction):
try:
result.update(inspect.getsource(klass).encode())
except IOError:
# means source can't be found. Well, can't do anything
# about that I don't think
pass
else:
pass
if self.original_tree is not None:
tree_str = ast.dump(self.original_tree,
annotate_fields=True, include_attributes=True)
result.update(tree_str.encode())
return int(result.hexdigest(), 16)
def config_to_dirname(self, program_config):
"""Returns the subdirectory name under .compiled/funcname"""
# fixes the directory names and squishes invalid chars
regex_filter = re.compile(r"""[/\?%*:|"<>()'{} -]""")
def deep_getattr(obj, s):
parts = s.split('.')
for part in parts:
obj = getattr(obj, part)
return obj
path_parts = [
self.sub_dir,
str(self._hash(program_config.args_subconfig)),
str(self._hash(program_config.tuner_subconfig))
]
for attrib in self._directory_fields:
path_parts.append(str(deep_getattr(self, attrib)))
filtered_parts = [
str(re.sub(regex_filter, '_', part)) for part in path_parts]
compile_path = str(ctree.CONFIG.get('jit', 'COMPILE_PATH'))
path = os.path.join(compile_path, *filtered_parts)
return re.sub('_+', '_', path)
def get_program_config(self, args, kwargs):
# Don't break old specializers that don't support kwargs
try:
args_subconfig = self.args_to_subconfig(args, kwargs)
except TypeError:
args_subconfig = self.args_to_subconfig(args)
tuner_subconfig = next(self._tuner.configs)
log.info("tuner subconfig: %s", tuner_subconfig)
log.info("arguments subconfig: %s", args_subconfig)
return self.ProgramConfig(args_subconfig, tuner_subconfig)
def get_transform_result(self, program_config, dir_name, cache=True):
info = self.get_info(dir_name)
# check to see if the necessary code is in the persistent cache
if hash(self) != info['hash'] and self.original_tree is not None \
or not cache:
# need to run transform() for code generation
log.info('Hash miss. Running Transform')
ctree.STATS.log("Filesystem cache miss")
transform_result = self.run_transform(program_config)
# Saving files to cache directory
for source_file in transform_result:
assert isinstance(source_file, File), \
"Transform must return an iterable of Files"
source_file.path = dir_name
new_info = {'hash': hash(self),
'files': [os.path.join(f.path, f.get_filename())
for f in transform_result]}
self.set_info(dir_name, new_info)
else:
log.info('Hash hit. Skipping transform')
ctree.STATS.log('Filesystem cache hit')
files = [getFile(path) for path in info['files']]
transform_result = files
return transform_result
def __call__(self, *args, **kwargs):
"""
Determines the program_configuration to be run. If it has yet to be
built, build it. Then, execute it. If the selected
program_configuration for this function has already been code
generated for, this method draws from the cache.
"""
ctree.STATS.log("specialized function call")
log.info("detected specialized function call with arg types: %s",
[type(a) for a in args] +
[type(kwargs[key]) for key in kwargs])
program_config = self.get_program_config(args, kwargs)
dir_name = self.config_to_dirname(program_config)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
config_hash = dir_name
# checks to see if the necessary code is in the run-time cache
if ctree.CONFIG.getboolean('jit', 'CACHE') and \
config_hash in self.concrete_functions:
ctree.STATS.log("specialized function cache hit")
log.info("specialized function cache hit!")
csf = self.concrete_functions[config_hash]
else:
ctree.STATS.log("specialized function cache miss")
log.info("specialized function cache miss.")
transform_result = self.get_transform_result(
program_config, dir_name)
csf = self.finalize(transform_result, program_config)
assert isinstance(csf, ConcreteSpecializedFunction), \
"Expected a ctree.jit.ConcreteSpecializedFunction, \
but got a %s." % type(csf)
self.concrete_functions[config_hash] = csf
return csf(*args, **kwargs)
def run_transform(self, program_config):
transform_result = self.transform(
self.original_tree,
program_config
)
if not isinstance(transform_result, (tuple, list)):
transform_result = (transform_result,)
transform_result = [DeclarationFiller().visit(source_file)
if isinstance(source_file, CFile) else source_file
for source_file in transform_result]
return transform_result
@classmethod
def from_function(cls, func, folder_name=''):
class Replacer(ast.NodeTransformer):
def visit_Module(self, node):
return MultiNode(body=[self.visit(i) for i in node.body])
def visit_FunctionDef(self, node):
if node.name == func.__name__:
node.name = 'apply'
node.body = [self.visit(item) for item in node.body]
return node
def visit_Name(self, node):
if node.id == func.__name__:
node.id = 'apply'
return node
func_ast = Replacer().visit(get_ast(func))
return cls(py_ast=func_ast, sub_dir=folder_name or func.__name__)
def report(self, *args, **kwargs):
"""
Records the performance of the most recent configuration.
"""
return self._tuner.report(*args, **kwargs)
# =====================================================
# Methods to be overridden by the user
def transform(self, tree, program_config):
"""
Convert the AST 'tree' into a C AST, optionally taking advantage of the
actual runtime arguments.
"""
raise NotImplementedError()
def finalize(self, transform_result, program_config):
"""
This function will be passed the result of transform. The specializer
should return an ConcreteSpecializedFunction.
"""
raise NotImplementedError("Finalize must be implemented")
def get_tuning_driver(self):
"""
Define the space of possible implementations.
"""
from ctree.tune import ConstantTuningDriver
return ConstantTuningDriver()
def args_to_subconfig(self, args):
"""
Extract features from the arguments to define uniqueness of
this particular invocation. The return value must be a hashable
object, or a dictionary of hashable objects.
"""
log.warn("arguments will not influence program_config. " +
"Consider overriding args_to_subconfig() in %s.",
type(self).__name__)
return dict()
@staticmethod
def apply(*args):
raise NotImplementedError()
|
mbdriscoll/ctree
|
ctree/jit.py
|
Python
|
bsd-2-clause
| 14,292
|
[
"VisIt"
] |
0f8c175c13cd14fac5840bb46e55f096c0d8c728b88afffd8ab6595f5b28183c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.